diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -702,50 +702,6 @@ !eq(nf, 8): !if(signed, "vvvvvvvv", "UvUvUvUvUvUvUvUv")); } -multiclass RVVIndexedSegStore { - foreach type = TypeList in { - foreach eew_info = EEWList in { - defvar eew = eew_info[0]; - defvar eew_type = eew_info[1]; - foreach nf = NFList in { - let Name = op # nf # "ei" # eew # "_v", - IRName = op # nf, - MaskedIRName = op # nf # "_mask", - NF = nf, - HasMaskedOffOperand = false, - MaskedPolicyScheme = NonePolicy, - ManualCodegen = [{ - { - if (IsMasked) { - // Builtin: (mask, ptr, index, val0, val1, ..., vl) - // Intrinsic: (val0, val1, ..., ptr, index, mask, vl) - std::rotate(Ops.begin(), Ops.begin() + 3, Ops.end() - 1); - std::rotate(Ops.begin() + NF, Ops.begin() + NF + 1, Ops.begin() + NF + 3); - IntrinsicTypes = {Ops[0]->getType(), - Ops[NF + 1]->getType(), Ops[NF + 3]->getType()}; - assert(Ops.size() == NF + 4); - } else { - // Builtin: (ptr, index, val0, val1, ..., vl) - // Intrinsic: (val0, val1, ..., ptr, index, vl) - std::rotate(Ops.begin(), Ops.begin() + 2, Ops.end() - 1); - IntrinsicTypes = {Ops[0]->getType(), - Ops[NF + 1]->getType(), Ops[NF + 2]->getType()}; - assert(Ops.size() == NF + 3); - } - } - }] in { - defvar V = VString.S; - defvar UV = VString.S; - def : RVVBuiltin<"v", "0Pe" # eew_type # "Uv" # V, type>; - if !not(IsFloat.val) then { - def : RVVBuiltin<"Uv", "0PUe" # eew_type # "Uv" # UV, type>; - } - } - } - } - } -} - multiclass RVVPseudoUnaryBuiltin { let Name = NAME, IRName = IR, @@ -1504,8 +1460,7 @@ defvar eew = eew_info[0]; defvar eew_type = eew_info[1]; foreach nf = NFList in { - let Name = op # nf # "ei" # eew # "_v_tuple", - OverloadedName = op # nf # "ei" # eew # "_tuple", + let Name = op # nf # "ei" # eew # "_v", IRName = op # nf, MaskedIRName = op # nf # "_mask", NF = nf, @@ -1569,13 +1524,6 @@ defm : RVVIndexedSegStoreTuple<"vsoxseg">; } - -let UnMaskedPolicyScheme = NonePolicy, - MaskedPolicyScheme = NonePolicy in { -defm : RVVIndexedSegStore<"vsuxseg">; -defm : RVVIndexedSegStore<"vsoxseg">; -} - // 12. Vector Integer Arithmetic Instructions // 12.1. Vector Single-Width Integer Add and Subtract let UnMaskedPolicyScheme = HasPassthruOperand in { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei16.c @@ -7,963 +7,1347 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_f16mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f16mf4x2(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_f16mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_f16mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f16mf2x2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_f16mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_f16m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f16m1x2(_Float16 *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_f16m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_f16m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f16m2x2(_Float16 *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_f16m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16m4(_Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_f16m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f16m4x2(_Float16 *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_f16m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_f32mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f32mf2x2(float *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_f32mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_f32m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f32m1x2(float *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_f32m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_f32m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f32m2x2(float *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_f32m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_f32m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f32m4x2(float *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_f32m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_f64m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f64m1x2(double *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_f64m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_f64m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f64m2x2(double *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_f64m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_f64m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f64m4x2(double *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_f64m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i8mf8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i8mf8x2(int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i8mf8x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i8mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i8mf4x2(int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i8mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i8mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i8mf2x2(int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i8mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i8m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i8m1x2(int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i8m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i8m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i8m2x2(int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i8m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i8m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i8m4x2(int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i8m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i16mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i16mf4x2(int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i16mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i16mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i16mf2x2(int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i16mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i16m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i16m1x2(int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i16m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i16m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i16m2x2(int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i16m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i16m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i16m4x2(int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i16m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i32mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i32mf2x2(int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i32mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i32m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i32m1x2(int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i32m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i32m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i32m2x2(int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i32m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i32m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i32m4x2(int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i32m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i64m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i64m1x2(int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i64m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i64m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i64m2x2(int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i64m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i64m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i64m4x2(int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i64m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u8mf8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u8mf8x2(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u8mf8x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u8mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u8mf4x2(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u8mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u8mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u8mf2x2(uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u8mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u8m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u8m1x2(uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u8m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u8m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u8m2x2(uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u8m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u8m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u8m4x2(uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u8m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u16mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u16mf4x2(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u16mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u16mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u16mf2x2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u16mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u16m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u16m1x2(uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u16m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u16m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u16m2x2(uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u16m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16m4(uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u16m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u16m4x2(uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u16m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u32mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u32mf2x2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u32mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u32m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u32m1x2(uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u32m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u32m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u32m2x2(uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u32m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u32m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u32m4x2(uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u32m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u64m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u64m1x2(uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u64m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u64m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u64m2x2(uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u64m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u64m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u64m4x2(uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u64m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_f16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_f16mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_f16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_f16mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_f16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_f16m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_f16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_f16m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_f16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_f16m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_f32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f32mf2x2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_f32mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_f32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f32m1x2_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_f32m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_f32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f32m2x2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_f32m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_f32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f32m4x2_m(vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_f32m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_f64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f64m1x2_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_f64m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_f64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f64m2x2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_f64m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_f64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f64m4x2_m(vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_f64m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i8mf8_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i8mf8x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i8mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i8mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i8mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i8mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i8m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i8m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i8m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i8m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i8m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i8m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i16mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i16mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i16m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i16m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i16m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i32mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i32m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i32m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i32m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i64m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i64m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_i64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_i64m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u8mf8_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u8mf8x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u8mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u8mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u8mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u8mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u8m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u8m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u8m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u8m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u8m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u8m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u16mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u16mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u16m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u16m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u16m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u32mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u32m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u32m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u32m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u64m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u64m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16_v_u64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16_v_u64m4x2_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei32.c @@ -7,923 +7,1291 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_f16mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f16mf4x2(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_f16mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_f16mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f16mf2x2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_f16mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_f16m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f16m1x2(_Float16 *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_f16m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_f16m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f16m2x2(_Float16 *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_f16m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16m4(_Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_f16m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f16m4x2(_Float16 *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_f16m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_f32mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f32mf2x2(float *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_f32mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_f32m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f32m1x2(float *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_f32m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_f32m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f32m2x2(float *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_f32m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_f32m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f32m4x2(float *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_f32m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_f64m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f64m1x2(double *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_f64m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_f64m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f64m2x2(double *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_f64m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_f64m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f64m4x2(double *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_f64m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_i8mf8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i8mf8x2(int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_i8mf8x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_i8mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i8mf4x2(int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_i8mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_i8mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i8mf2x2(int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_i8mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_i8m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i8m1x2(int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_i8m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_i8m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i8m2x2(int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_i8m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_i16mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i16mf4x2(int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_i16mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_i16mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i16mf2x2(int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_i16mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_i16m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i16m1x2(int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_i16m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_i16m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i16m2x2(int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_i16m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_i16m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i16m4x2(int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_i16m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_i32mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i32mf2x2(int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_i32mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_i32m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i32m1x2(int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_i32m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_i32m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i32m2x2(int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_i32m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_i32m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i32m4x2(int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_i32m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_i64m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i64m1x2(int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_i64m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_i64m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i64m2x2(int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_i64m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_i64m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i64m4x2(int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_i64m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_u8mf8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u8mf8x2(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_u8mf8x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_u8mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u8mf4x2(uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_u8mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_u8mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u8mf2x2(uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_u8mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_u8m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u8m1x2(uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_u8m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_u8m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u8m2x2(uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_u8m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_u16mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u16mf4x2(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_u16mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_u16mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u16mf2x2(uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_u16mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_u16m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u16m1x2(uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_u16m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_u16m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u16m2x2(uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_u16m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16m4(uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_u16m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u16m4x2(uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_u16m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_u32mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u32mf2x2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_u32mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_u32m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u32m1x2(uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_u32m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_u32m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u32m2x2(uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_u32m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_u32m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u32m4x2(uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_u32m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_u64m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u64m1x2(uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_u64m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_u64m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u64m2x2(uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_u64m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_u64m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u64m4x2(uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_u64m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_f16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_f16mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_f16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_f16mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_f16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_f16m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_f16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_f16m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_f16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_f16m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_f32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f32mf2x2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_f32mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_f32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f32m1x2_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_f32m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_f32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f32m2x2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_f32m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_f32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f32m4x2_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_f32m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_f64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f64m1x2_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_f64m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_f64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f64m2x2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_f64m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_f64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f64m4x2_m(vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_f64m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_i8mf8_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_i8mf8x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_i8mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_i8mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_i8mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_i8mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_i8m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_i8m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_i8m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_i8m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_i16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_i16mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_i16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_i16mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_i16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_i16m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_i16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_i16m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_i16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_i16m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_i32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_i32mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_i32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_i32m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_i32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_i32m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_i32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_i32m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_i64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_i64m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_i64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_i64m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_i64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_i64m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_u8mf8_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_u8mf8x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_u8mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_u8mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_u8mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_u8mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_u8m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_u8m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_u8m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_u8m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_u16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_u16mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_u16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_u16mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_u16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_u16m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_u16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_u16m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_u16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_u16m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_u32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_u32mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_u32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_u32m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_u32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_u32m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_u32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_u32m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_u64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_u64m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_u64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_u64m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32_v_u64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32_v_u64m4x2_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei32_tuple.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei32_tuple.c deleted file mode 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei32_tuple.c +++ /dev/null @@ -1,1297 +0,0 @@ -// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 -// REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ -// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ -// RUN: FileCheck --check-prefix=CHECK-RV64 %s - -#include - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f16mf4x2(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_f16mf4x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f16mf2x2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_f16mf2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f16m1x2(_Float16 *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_f16m1x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f16m2x2(_Float16 *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_f16m2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f16m4x2(_Float16 *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_f16m4x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32mf2x2(float *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_f32mf2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32m1x2(float *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_f32m1x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32m2x2(float *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_f32m2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32m4x2(float *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_f32m4x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f64m1x2(double *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_f64m1x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f64m2x2(double *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_f64m2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f64m4x2(double *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_f64m4x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf8x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8mf8x2(int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_i8mf8x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8mf4x2(int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_i8mf4x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8mf2x2(int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_i8mf2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8m1x2(int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_i8m1x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8m2x2(int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_i8m2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16mf4x2(int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_i16mf4x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16mf2x2(int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_i16mf2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16m1x2(int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_i16m1x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16m2x2(int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_i16m2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16m4x2(int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_i16m4x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i32mf2x2(int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_i32mf2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i32m1x2(int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_i32m1x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i32m2x2(int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_i32m2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i32m4x2(int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_i32m4x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i64m1x2(int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_i64m1x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i64m2x2(int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_i64m2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i64m4x2(int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_i64m4x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf8x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8mf8x2(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_u8mf8x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8mf4x2(uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_u8mf4x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8mf2x2(uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_u8mf2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8m1x2(uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_u8m1x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8m2x2(uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_u8m2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16mf4x2(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_u16mf4x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16mf2x2(uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_u16mf2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16m1x2(uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_u16m1x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16m2x2(uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_u16m2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16m4x2(uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_u16m4x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u32mf2x2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_u32mf2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u32m1x2(uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_u32m1x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u32m2x2(uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_u32m2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u32m4x2(uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_u32m4x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u64m1x2(uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_u64m1x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u64m2x2(uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_u64m2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u64m4x2(uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_u64m4x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_f16mf4x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_f16mf2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_f16m1x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_f16m2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_f16m4x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32mf2x2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_f32mf2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32m1x2_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_f32m1x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32m2x2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_f32m2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32m4x2_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_f32m4x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f64m1x2_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_f64m1x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f64m2x2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_f64m2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f64m4x2_m(vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_f64m4x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf8x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_i8mf8x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_i8mf4x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_i8mf2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_i8m1x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_i8m2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_i16mf4x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_i16mf2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_i16m1x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_i16m2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_i16m4x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_i32mf2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_i32m1x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_i32m2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_i32m4x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_i64m1x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_i64m2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_i64m4x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf8x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_u8mf8x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_u8mf4x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_u8mf2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_u8m1x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_u8m2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_u16mf4x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_u16mf2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_u16m1x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_u16m2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_u16m4x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_u32mf2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_u32m1x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_u32m2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_u32m4x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_u64m1x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_u64m2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_v_tuple_u64m4x2_m(mask, base, bindex, v_tuple, vl); -} - diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei64.c @@ -7,823 +7,1151 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_f16mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f16mf4x2(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_f16mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_f16mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f16mf2x2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_f16mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_f16m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f16m1x2(_Float16 *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_f16m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_f16m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f16m2x2(_Float16 *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_f16m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_f32mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f32mf2x2(float *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_f32mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_f32m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f32m1x2(float *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_f32m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_f32m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f32m2x2(float *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_f32m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_f32m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f32m4x2(float *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_f32m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_f64m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f64m1x2(double *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_f64m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_f64m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f64m2x2(double *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_f64m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_f64m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f64m4x2(double *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_f64m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_i8mf8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i8mf8x2(int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_i8mf8x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_i8mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i8mf4x2(int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_i8mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_i8mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i8mf2x2(int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_i8mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_i8m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i8m1x2(int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_i8m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_i16mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i16mf4x2(int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_i16mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_i16mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i16mf2x2(int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_i16mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_i16m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i16m1x2(int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_i16m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_i16m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i16m2x2(int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_i16m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_i32mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i32mf2x2(int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_i32mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_i32m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i32m1x2(int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_i32m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_i32m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i32m2x2(int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_i32m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_i32m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i32m4x2(int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_i32m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_i64m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i64m1x2(int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_i64m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_i64m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i64m2x2(int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_i64m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_i64m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i64m4x2(int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_i64m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_u8mf8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u8mf8x2(uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_u8mf8x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_u8mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u8mf4x2(uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_u8mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_u8mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u8mf2x2(uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_u8mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_u8m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u8m1x2(uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_u8m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_u16mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u16mf4x2(uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_u16mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_u16mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u16mf2x2(uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_u16mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_u16m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u16m1x2(uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_u16m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_u16m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u16m2x2(uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_u16m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_u32mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u32mf2x2(uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_u32mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_u32m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u32m1x2(uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_u32m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_u32m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u32m2x2(uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_u32m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_u32m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u32m4x2(uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_u32m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_u64m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u64m1x2(uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_u64m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_u64m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u64m2x2(uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_u64m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_u64m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u64m4x2(uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_u64m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_f16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_f16mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_f16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_f16mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_f16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_f16m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_f16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_f16m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_f32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f32mf2x2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_f32mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_f32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f32m1x2_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_f32m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_f32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f32m2x2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_f32m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_f32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f32m4x2_m(vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_f32m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_f64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f64m1x2_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_f64m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_f64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f64m2x2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_f64m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_f64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f64m4x2_m(vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_f64m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_i8mf8_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_i8mf8x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_i8mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_i8mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_i8mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_i8mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_i8m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_i8m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_i16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_i16mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_i16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_i16mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_i16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_i16m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_i16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_i16m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_i32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_i32mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_i32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_i32m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_i32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_i32m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_i32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_i32m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_i64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_i64m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_i64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_i64m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_i64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_i64m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_u8mf8_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_u8mf8x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_u8mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_u8mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_u8mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_u8mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_u8m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_u8m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_u16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_u16mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_u16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_u16mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_u16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_u16m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_u16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_u16m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_u32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_u32mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_u32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_u32m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_u32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_u32m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_u32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_u32m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_u64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_u64m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_u64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_u64m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64_v_u64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64_v_u64m4x2_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg2ei8.c @@ -7,963 +7,1347 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_f16mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f16mf4x2(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_f16mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_f16mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f16mf2x2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_f16mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_f16m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f16m1x2(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_f16m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_f16m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f16m2x2(_Float16 *base, vuint8m1_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_f16m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16m4(_Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_f16m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f16m4x2(_Float16 *base, vuint8m2_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_f16m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_f32mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f32mf2x2(float *base, vuint8mf8_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_f32mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_f32m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f32m1x2(float *base, vuint8mf4_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_f32m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_f32m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f32m2x2(float *base, vuint8mf2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_f32m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_f32m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f32m4x2(float *base, vuint8m1_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_f32m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_f64m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f64m1x2(double *base, vuint8mf8_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_f64m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_f64m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f64m2x2(double *base, vuint8mf4_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_f64m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_f64m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f64m4x2(double *base, vuint8mf2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_f64m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i8mf8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i8mf8x2(int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i8mf8x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i8mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i8mf4x2(int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i8mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i8mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i8mf2x2(int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i8mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i8m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i8m1x2(int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i8m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i8m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i8m2x2(int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i8m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i8m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i8m4x2(int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i8m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i16mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i16mf4x2(int16_t *base, vuint8mf8_t bindex, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i16mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i16mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i16mf2x2(int16_t *base, vuint8mf4_t bindex, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i16mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i16m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i16m1x2(int16_t *base, vuint8mf2_t bindex, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i16m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i16m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i16m2x2(int16_t *base, vuint8m1_t bindex, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i16m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i16m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i16m4x2(int16_t *base, vuint8m2_t bindex, vint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i16m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i32mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i32mf2x2(int32_t *base, vuint8mf8_t bindex, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i32mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i32m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i32m1x2(int32_t *base, vuint8mf4_t bindex, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i32m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i32m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i32m2x2(int32_t *base, vuint8mf2_t bindex, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i32m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i32m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i32m4x2(int32_t *base, vuint8m1_t bindex, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i32m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i64m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i64m1x2(int64_t *base, vuint8mf8_t bindex, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i64m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i64m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i64m2x2(int64_t *base, vuint8mf4_t bindex, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i64m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i64m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i64m4x2(int64_t *base, vuint8mf2_t bindex, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i64m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u8mf8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u8mf8x2(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u8mf8x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u8mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u8mf4x2(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u8mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u8mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u8mf2x2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u8mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u8m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u8m1x2(uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u8m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u8m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u8m2x2(uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u8m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u8m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u8m4x2(uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u8m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u16mf4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u16mf4x2(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u16mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u16mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u16mf2x2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u16mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u16m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u16m1x2(uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u16m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u16m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u16m2x2(uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u16m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u16m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u16m4x2(uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u16m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u32mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u32mf2x2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u32mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u32m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u32m1x2(uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u32m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u32m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u32m2x2(uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u32m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u32m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u32m4x2(uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u32m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u64m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u64m1x2(uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u64m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u64m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u64m2x2(uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u64m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u64m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u64m4x2(uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u64m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_f16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_f16mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_f16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_f16mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_f16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_f16m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_f16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_f16m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_f16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_f16m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_f32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f32mf2x2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_f32mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_f32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f32m1x2_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_f32m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_f32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f32m2x2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_f32m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_f32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f32m4x2_m(vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_f32m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_f64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f64m1x2_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_f64m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_f64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f64m2x2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_f64m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_f64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f64m4x2_m(vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_f64m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i8mf8_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i8mf8x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i8mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i8mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i8mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i8mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i8m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i8m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i8m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i8m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i8m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i8m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i16mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i16mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i16m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i16m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i16m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i32mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i32m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i32m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i32m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i64m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i64m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_i64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_i64m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u8mf8_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u8mf8x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u8mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u8mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u8mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u8mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u8m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u8m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u8m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u8m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u8m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u8m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u16mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u16mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u16m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u16m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u16m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u32mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u32m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u32m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u32m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u64m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u64m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8_v_u64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8_v_u64m4x2_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei16.c @@ -7,743 +7,1187 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_f16mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f16mf4x3(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_f16mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_f16mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f16mf2x3(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_f16mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_f16m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f16m1x3(_Float16 *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_f16m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_f16m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f16m2x3(_Float16 *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_f16m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_f32mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f32mf2x3(float *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_f32mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_f32m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f32m1x3(float *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_f32m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_f32m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f32m2x3(float *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_f32m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_f64m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f64m1x3(double *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_f64m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_f64m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f64m2x3(double *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_f64m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_i8mf8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i8mf8x3(int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_i8mf8x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_i8mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i8mf4x3(int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_i8mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_i8mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i8mf2x3(int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_i8mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_i8m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i8m1x3(int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_i8m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_i8m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i8m2x3(int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_i8m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_i16mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i16mf4x3(int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_i16mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_i16mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i16mf2x3(int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_i16mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_i16m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i16m1x3(int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_i16m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_i16m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i16m2x3(int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_i16m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_i32mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i32mf2x3(int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_i32mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_i32m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i32m1x3(int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_i32m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_i32m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i32m2x3(int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_i32m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_i64m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i64m1x3(int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_i64m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_i64m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i64m2x3(int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_i64m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_u8mf8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u8mf8x3(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_u8mf8x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_u8mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u8mf4x3(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_u8mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_u8mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u8mf2x3(uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_u8mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_u8m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u8m1x3(uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_u8m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_u8m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u8m2x3(uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_u8m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_u16mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u16mf4x3(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_u16mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_u16mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u16mf2x3(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_u16mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_u16m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u16m1x3(uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_u16m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_u16m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u16m2x3(uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_u16m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_u32mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u32mf2x3(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_u32mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_u32m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u32m1x3(uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_u32m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_u32m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u32m2x3(uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_u32m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_u64m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u64m1x3(uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_u64m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_u64m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u64m2x3(uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_u64m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_f16mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_f16mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_f16m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_f16m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f32mf2x3_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_f32mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f32m1x3_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_f32m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f32m2x3_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_f32m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f64m1x3_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_f64m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f64m2x3_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_f64m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_i8mf8x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_i8mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_i8mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_i8m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_i8m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_i16mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_i16mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_i16m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_i16m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_i32mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_i32m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_i32m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_i64m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_i64m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_u8mf8x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_u8mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_u8mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_u8m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_u8m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_u16mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_u16mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_u16m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_u16m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_u32mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_u32m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_u32m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_u64m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16_v_u64m2x3_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei32.c @@ -7,743 +7,1187 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_f16mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f16mf4x3(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_f16mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_f16mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f16mf2x3(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_f16mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_f16m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f16m1x3(_Float16 *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_f16m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_f16m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f16m2x3(_Float16 *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_f16m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_f32mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f32mf2x3(float *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_f32mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_f32m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f32m1x3(float *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_f32m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_f32m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f32m2x3(float *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_f32m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_f64m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f64m1x3(double *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_f64m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_f64m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f64m2x3(double *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_f64m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_i8mf8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i8mf8x3(int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_i8mf8x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_i8mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i8mf4x3(int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_i8mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_i8mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i8mf2x3(int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_i8mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_i8m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i8m1x3(int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_i8m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_i8m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i8m2x3(int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_i8m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_i16mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i16mf4x3(int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_i16mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_i16mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i16mf2x3(int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_i16mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_i16m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i16m1x3(int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_i16m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_i16m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i16m2x3(int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_i16m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_i32mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i32mf2x3(int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_i32mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_i32m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i32m1x3(int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_i32m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_i32m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i32m2x3(int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_i32m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_i64m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i64m1x3(int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_i64m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_i64m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i64m2x3(int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_i64m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_u8mf8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u8mf8x3(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_u8mf8x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_u8mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u8mf4x3(uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_u8mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_u8mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u8mf2x3(uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_u8mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_u8m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u8m1x3(uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_u8m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_u8m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u8m2x3(uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_u8m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_u16mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u16mf4x3(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_u16mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_u16mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u16mf2x3(uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_u16mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_u16m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u16m1x3(uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_u16m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_u16m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u16m2x3(uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_u16m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_u32mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u32mf2x3(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_u32mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_u32m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u32m1x3(uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_u32m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_u32m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u32m2x3(uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_u32m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_u64m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u64m1x3(uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_u64m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_u64m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u64m2x3(uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_u64m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_f16mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_f16mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_f16m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_f16m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f32mf2x3_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_f32mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f32m1x3_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_f32m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f32m2x3_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_f32m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f64m1x3_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_f64m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f64m2x3_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_f64m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_i8mf8x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_i8mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_i8mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_i8m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_i8m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_i16mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_i16mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_i16m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_i16m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_i32mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_i32m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_i32m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_i64m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_i64m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_u8mf8x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_u8mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_u8mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_u8m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_u8m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_u16mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_u16mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_u16m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_u16m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_u32mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_u32m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_u32m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_u64m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32_v_u64m2x3_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei64.c @@ -7,703 +7,1123 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_f16mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f16mf4x3(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_f16mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_f16mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f16mf2x3(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_f16mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_f16m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f16m1x3(_Float16 *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_f16m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_f16m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f16m2x3(_Float16 *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_f16m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_f32mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f32mf2x3(float *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_f32mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_f32m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f32m1x3(float *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_f32m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_f32m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f32m2x3(float *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_f32m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_f64m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f64m1x3(double *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_f64m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_f64m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f64m2x3(double *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_f64m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_i8mf8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i8mf8x3(int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_i8mf8x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_i8mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i8mf4x3(int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_i8mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_i8mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i8mf2x3(int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_i8mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_i8m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i8m1x3(int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_i8m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_i16mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i16mf4x3(int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_i16mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_i16mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i16mf2x3(int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_i16mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_i16m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i16m1x3(int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_i16m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_i16m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i16m2x3(int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_i16m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_i32mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i32mf2x3(int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_i32mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_i32m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i32m1x3(int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_i32m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_i32m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i32m2x3(int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_i32m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_i64m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i64m1x3(int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_i64m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_i64m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i64m2x3(int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_i64m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_u8mf8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u8mf8x3(uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_u8mf8x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_u8mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u8mf4x3(uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_u8mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_u8mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u8mf2x3(uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_u8mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_u8m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u8m1x3(uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_u8m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_u16mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u16mf4x3(uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_u16mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_u16mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u16mf2x3(uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_u16mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_u16m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u16m1x3(uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_u16m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_u16m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u16m2x3(uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_u16m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_u32mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u32mf2x3(uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_u32mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_u32m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u32m1x3(uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_u32m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_u32m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u32m2x3(uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_u32m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_u64m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u64m1x3(uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_u64m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_u64m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u64m2x3(uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_u64m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_f16mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_f16mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_f16m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_f16m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f32mf2x3_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_f32mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f32m1x3_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_f32m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f32m2x3_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_f32m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f64m1x3_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_f64m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f64m2x3_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_f64m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_i8mf8x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_i8mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_i8mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_i8m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_i16mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_i16mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_i16m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_i16m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_i32mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_i32m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_i32m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_i64m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_i64m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_u8mf8x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_u8mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_u8mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_u8m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_u16mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_u16mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_u16m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_u16m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_u32mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_u32m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_u32m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_u64m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64_v_u64m2x3_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg3ei8.c @@ -7,743 +7,1187 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_f16mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f16mf4x3(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_f16mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_f16mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f16mf2x3(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_f16mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_f16m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f16m1x3(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_f16m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_f16m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f16m2x3(_Float16 *base, vuint8m1_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_f16m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_f32mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f32mf2x3(float *base, vuint8mf8_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_f32mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_f32m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f32m1x3(float *base, vuint8mf4_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_f32m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_f32m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f32m2x3(float *base, vuint8mf2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_f32m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_f64m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f64m1x3(double *base, vuint8mf8_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_f64m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_f64m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f64m2x3(double *base, vuint8mf4_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_f64m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_i8mf8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i8mf8x3(int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_i8mf8x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_i8mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i8mf4x3(int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_i8mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_i8mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i8mf2x3(int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_i8mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_i8m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i8m1x3(int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_i8m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_i8m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i8m2x3(int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_i8m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_i16mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i16mf4x3(int16_t *base, vuint8mf8_t bindex, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_i16mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_i16mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i16mf2x3(int16_t *base, vuint8mf4_t bindex, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_i16mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_i16m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i16m1x3(int16_t *base, vuint8mf2_t bindex, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_i16m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_i16m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i16m2x3(int16_t *base, vuint8m1_t bindex, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_i16m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_i32mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i32mf2x3(int32_t *base, vuint8mf8_t bindex, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_i32mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_i32m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i32m1x3(int32_t *base, vuint8mf4_t bindex, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_i32m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_i32m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i32m2x3(int32_t *base, vuint8mf2_t bindex, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_i32m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_i64m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i64m1x3(int64_t *base, vuint8mf8_t bindex, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_i64m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_i64m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i64m2x3(int64_t *base, vuint8mf4_t bindex, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_i64m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_u8mf8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u8mf8x3(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_u8mf8x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_u8mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u8mf4x3(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_u8mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_u8mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u8mf2x3(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_u8mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_u8m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u8m1x3(uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_u8m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_u8m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u8m2x3(uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_u8m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_u16mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u16mf4x3(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_u16mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_u16mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u16mf2x3(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_u16mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_u16m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u16m1x3(uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_u16m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_u16m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u16m2x3(uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_u16m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_u32mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u32mf2x3(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_u32mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_u32m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u32m1x3(uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_u32m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_u32m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u32m2x3(uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_u32m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_u64m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u64m1x3(uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_u64m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_u64m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u64m2x3(uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_u64m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_f16mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_f16mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_f16m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_f16m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f32mf2x3_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_f32mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f32m1x3_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_f32m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f32m2x3_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_f32m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f64m1x3_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_f64m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f64m2x3_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_f64m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_i8mf8x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_i8mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_i8mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_i8m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_i8m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_i16mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_i16mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_i16m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_i16m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_i32mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_i32m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_i32m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_i64m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_i64m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_u8mf8x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_u8mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_u8mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_u8m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_u8m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_u16mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_u16mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_u16m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_u16m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_u32mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_u32m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_u32m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_u64m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8_v_u64m2x3_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei16.c @@ -7,743 +7,1335 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f16mf4x4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_f16mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f16mf2x4(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_f16mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_f16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f16m1x4(_Float16 *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_f16m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_f16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f16m2x4(_Float16 *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_f16m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f32mf2x4(float *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_f32mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_f32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f32m1x4(float *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_f32m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_f32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f32m2x4(float *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_f32m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_f64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f64m1x4(double *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_f64m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_f64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f64m2x4(double *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_f64m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i8mf8x4(int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_i8mf8x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i8mf4x4(int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_i8mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i8mf2x4(int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_i8mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_i8m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i8m1x4(int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_i8m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_i8m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i8m2x4(int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_i8m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i16mf4x4(int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_i16mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i16mf2x4(int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_i16mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_i16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i16m1x4(int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_i16m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_i16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i16m2x4(int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_i16m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i32mf2x4(int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_i32mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_i32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i32m1x4(int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_i32m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_i32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i32m2x4(int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_i32m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_i64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i64m1x4(int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_i64m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_i64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i64m2x4(int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_i64m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u8mf8x4(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_u8mf8x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u8mf4x4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_u8mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u8mf2x4(uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_u8mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_u8m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u8m1x4(uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_u8m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_u8m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u8m2x4(uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_u8m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u16mf4x4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_u16mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u16mf2x4(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_u16mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_u16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u16m1x4(uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_u16m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_u16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u16m2x4(uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_u16m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u32mf2x4(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_u32mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_u32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u32m1x4(uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_u32m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_u32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u32m2x4(uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_u32m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_u64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u64m1x4(uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_u64m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_u64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u64m2x4(uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_u64m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_f16mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_f16mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_f16m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_f16m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f32mf2x4_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_f32mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f32m1x4_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_f32m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f32m2x4_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_f32m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f64m1x4_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_f64m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f64m2x4_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_f64m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_i8mf8x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_i8mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_i8mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_i8m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_i8m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_i16mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_i16mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_i16m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_i16m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_i32mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_i32m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_i32m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_i64m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_i64m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_u8mf8x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_u8mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_u8mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_u8m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_u8m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_u16mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_u16mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_u16m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_u16m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_u32mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_u32m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_u32m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_u64m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16_v_u64m2x4_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei32.c @@ -7,743 +7,1335 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f16mf4x4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_f16mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f16mf2x4(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_f16mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_f16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f16m1x4(_Float16 *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_f16m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_f16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f16m2x4(_Float16 *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_f16m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f32mf2x4(float *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_f32mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_f32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f32m1x4(float *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_f32m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_f32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f32m2x4(float *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_f32m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_f64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f64m1x4(double *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_f64m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_f64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f64m2x4(double *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_f64m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i8mf8x4(int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_i8mf8x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i8mf4x4(int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_i8mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i8mf2x4(int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_i8mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_i8m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i8m1x4(int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_i8m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_i8m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i8m2x4(int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_i8m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i16mf4x4(int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_i16mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i16mf2x4(int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_i16mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_i16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i16m1x4(int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_i16m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_i16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i16m2x4(int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_i16m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i32mf2x4(int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_i32mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_i32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i32m1x4(int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_i32m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_i32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i32m2x4(int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_i32m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_i64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i64m1x4(int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_i64m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_i64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i64m2x4(int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_i64m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u8mf8x4(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_u8mf8x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u8mf4x4(uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_u8mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u8mf2x4(uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_u8mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_u8m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u8m1x4(uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_u8m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_u8m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u8m2x4(uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_u8m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u16mf4x4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_u16mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u16mf2x4(uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_u16mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_u16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u16m1x4(uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_u16m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_u16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u16m2x4(uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_u16m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u32mf2x4(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_u32mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_u32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u32m1x4(uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_u32m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_u32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u32m2x4(uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_u32m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_u64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u64m1x4(uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_u64m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_u64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u64m2x4(uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_u64m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_f16mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_f16mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_f16m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_f16m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f32mf2x4_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_f32mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f32m1x4_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_f32m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f32m2x4_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_f32m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f64m1x4_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_f64m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f64m2x4_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_f64m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_i8mf8x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_i8mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_i8mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_i8m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_i8m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_i16mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_i16mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_i16m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_i16m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_i32mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_i32m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_i32m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_i64m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_i64m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_u8mf8x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_u8mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_u8mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_u8m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_u8m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_u16mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_u16mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_u16m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_u16m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_u32mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_u32m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_u32m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_u64m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32_v_u64m2x4_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei64.c @@ -7,703 +7,1263 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f16mf4x4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_f16mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f16mf2x4(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_f16mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_f16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f16m1x4(_Float16 *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_f16m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_f16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f16m2x4(_Float16 *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_f16m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f32mf2x4(float *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_f32mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_f32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f32m1x4(float *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_f32m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_f32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f32m2x4(float *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_f32m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_f64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f64m1x4(double *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_f64m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_f64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f64m2x4(double *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_f64m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i8mf8x4(int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_i8mf8x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i8mf4x4(int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_i8mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i8mf2x4(int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_i8mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_i8m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i8m1x4(int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_i8m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i16mf4x4(int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_i16mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i16mf2x4(int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_i16mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_i16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i16m1x4(int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_i16m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_i16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i16m2x4(int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_i16m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i32mf2x4(int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_i32mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_i32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i32m1x4(int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_i32m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_i32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i32m2x4(int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_i32m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_i64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i64m1x4(int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_i64m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_i64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i64m2x4(int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_i64m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u8mf8x4(uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_u8mf8x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u8mf4x4(uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_u8mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u8mf2x4(uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_u8mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_u8m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u8m1x4(uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_u8m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u16mf4x4(uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_u16mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u16mf2x4(uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_u16mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_u16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u16m1x4(uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_u16m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_u16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u16m2x4(uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_u16m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u32mf2x4(uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_u32mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_u32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u32m1x4(uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_u32m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_u32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u32m2x4(uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_u32m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_u64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u64m1x4(uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_u64m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_u64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u64m2x4(uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_u64m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_f16mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_f16mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_f16m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_f16m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f32mf2x4_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_f32mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f32m1x4_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_f32m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f32m2x4_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_f32m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f64m1x4_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_f64m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f64m2x4_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_f64m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_i8mf8x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_i8mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_i8mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_i8m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_i16mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_i16mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_i16m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_i16m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_i32mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_i32m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_i32m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_i64m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_i64m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_u8mf8x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_u8mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_u8mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_u8m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_u16mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_u16mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_u16m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_u16m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_u32mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_u32m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_u32m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_u64m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64_v_u64m2x4_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg4ei8.c @@ -7,743 +7,1335 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f16mf4x4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_f16mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f16mf2x4(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_f16mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_f16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f16m1x4(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_f16m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_f16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f16m2x4(_Float16 *base, vuint8m1_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_f16m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f32mf2x4(float *base, vuint8mf8_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_f32mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_f32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f32m1x4(float *base, vuint8mf4_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_f32m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_f32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f32m2x4(float *base, vuint8mf2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_f32m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_f64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f64m1x4(double *base, vuint8mf8_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_f64m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_f64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f64m2x4(double *base, vuint8mf4_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_f64m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i8mf8x4(int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_i8mf8x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i8mf4x4(int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_i8mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i8mf2x4(int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_i8mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_i8m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i8m1x4(int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_i8m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_i8m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i8m2x4(int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_i8m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i16mf4x4(int16_t *base, vuint8mf8_t bindex, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_i16mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i16mf2x4(int16_t *base, vuint8mf4_t bindex, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_i16mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_i16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i16m1x4(int16_t *base, vuint8mf2_t bindex, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_i16m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_i16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i16m2x4(int16_t *base, vuint8m1_t bindex, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_i16m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i32mf2x4(int32_t *base, vuint8mf8_t bindex, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_i32mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_i32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i32m1x4(int32_t *base, vuint8mf4_t bindex, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_i32m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_i32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i32m2x4(int32_t *base, vuint8mf2_t bindex, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_i32m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_i64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i64m1x4(int64_t *base, vuint8mf8_t bindex, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_i64m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_i64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i64m2x4(int64_t *base, vuint8mf4_t bindex, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_i64m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u8mf8x4(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_u8mf8x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u8mf4x4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_u8mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u8mf2x4(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_u8mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_u8m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u8m1x4(uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_u8m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_u8m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u8m2x4(uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_u8m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u16mf4x4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_u16mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u16mf2x4(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_u16mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_u16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u16m1x4(uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_u16m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_u16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u16m2x4(uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_u16m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u32mf2x4(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_u32mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_u32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u32m1x4(uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_u32m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_u32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u32m2x4(uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_u32m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_u64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u64m1x4(uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_u64m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_u64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u64m2x4(uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_u64m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_f16mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_f16mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_f16m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_f16m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f32mf2x4_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_f32mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f32m1x4_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_f32m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f32m2x4_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_f32m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f64m1x4_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_f64m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f64m2x4_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_f64m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_i8mf8x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_i8mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_i8mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_i8m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_i8m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_i16mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_i16mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_i16m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_i16m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_i32mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_i32m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_i32m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_i64m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_i64m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_u8mf8x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_u8mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_u8mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_u8m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_u8m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_u16mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_u16mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_u16m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_u16m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_u32mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_u32m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_u32m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_u64m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8_v_u64m2x4_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei16.c @@ -7,523 +7,1043 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_f16mf4x5(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_f16mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_f16mf2x5(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_f16mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_f16m1x5(_Float16 *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_f16m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_f32mf2x5(float *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_f32mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_f32m1x5(float *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_f32m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_f64m1x5(double *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_f64m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i8mf8x5(int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_i8mf8x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i8mf4x5(int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_i8mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i8mf2x5(int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_i8mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_i8m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i8m1x5(int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_i8m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i16mf4x5(int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_i16mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i16mf2x5(int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_i16mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_i16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i16m1x5(int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_i16m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i32mf2x5(int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_i32mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_i32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i32m1x5(int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_i32m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_i64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i64m1x5(int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_i64m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u8mf8x5(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_u8mf8x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u8mf4x5(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_u8mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u8mf2x5(uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_u8mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_u8m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u8m1x5(uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_u8m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u16mf4x5(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_u16mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u16mf2x5(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_u16mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_u16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u16m1x5(uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_u16m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u32mf2x5(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_u32mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_u32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u32m1x5(uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_u32m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_u64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u64m1x5(uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_u64m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_f16mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_f16mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_f16m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_f32mf2x5_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_f32mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_f32m1x5_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_f32m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_f64m1x5_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_f64m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_i8mf8x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_i8mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_i8mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_i8m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_i16mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_i16mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_i16m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_i32mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_i32m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_i64m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_u8mf8x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_u8mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_u8mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_u8m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_u16mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_u16mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_u16m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_u32mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_u32m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16_v_u64m1x5_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei32.c @@ -7,523 +7,1043 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_f16mf4x5(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_f16mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_f16mf2x5(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_f16mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_f16m1x5(_Float16 *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_f16m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_f32mf2x5(float *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_f32mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_f32m1x5(float *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_f32m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_f64m1x5(double *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_f64m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i8mf8x5(int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_i8mf8x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i8mf4x5(int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_i8mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i8mf2x5(int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_i8mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_i8m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i8m1x5(int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_i8m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i16mf4x5(int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_i16mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i16mf2x5(int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_i16mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_i16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i16m1x5(int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_i16m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i32mf2x5(int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_i32mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_i32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i32m1x5(int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_i32m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_i64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i64m1x5(int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_i64m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u8mf8x5(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_u8mf8x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u8mf4x5(uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_u8mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u8mf2x5(uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_u8mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_u8m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u8m1x5(uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_u8m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u16mf4x5(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_u16mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u16mf2x5(uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_u16mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_u16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u16m1x5(uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_u16m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u32mf2x5(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_u32mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_u32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u32m1x5(uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_u32m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_u64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u64m1x5(uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_u64m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_f16mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_f16mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_f16m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_f32mf2x5_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_f32mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_f32m1x5_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_f32m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_f64m1x5_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_f64m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_i8mf8x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_i8mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_i8mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_i8m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_i16mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_i16mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_i16m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_i32mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_i32m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_i64m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_u8mf8x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_u8mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_u8mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_u8m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_u16mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_u16mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_u16m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_u32mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_u32m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32_v_u64m1x5_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei64.c @@ -7,523 +7,1043 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_f16mf4x5(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_f16mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_f16mf2x5(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_f16mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_f16m1x5(_Float16 *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_f16m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_f32mf2x5(float *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_f32mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_f32m1x5(float *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_f32m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_f64m1x5(double *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_f64m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i8mf8x5(int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_i8mf8x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i8mf4x5(int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_i8mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i8mf2x5(int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_i8mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_i8m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i8m1x5(int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_i8m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i16mf4x5(int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_i16mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i16mf2x5(int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_i16mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_i16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i16m1x5(int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_i16m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i32mf2x5(int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_i32mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_i32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i32m1x5(int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_i32m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_i64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i64m1x5(int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_i64m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u8mf8x5(uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_u8mf8x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u8mf4x5(uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_u8mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u8mf2x5(uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_u8mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_u8m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u8m1x5(uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_u8m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u16mf4x5(uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_u16mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u16mf2x5(uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_u16mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_u16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u16m1x5(uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_u16m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u32mf2x5(uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_u32mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_u32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u32m1x5(uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_u32m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_u64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u64m1x5(uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_u64m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_f16mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_f16mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_f16m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_f32mf2x5_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_f32mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_f32m1x5_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_f32m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_f64m1x5_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_f64m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_i8mf8x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_i8mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_i8mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_i8m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_i16mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_i16mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_i16m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_i32mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_i32m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_i64m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_u8mf8x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_u8mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_u8mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_u8m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_u16mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_u16mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_u16m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_u32mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_u32m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64_v_u64m1x5_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg5ei8.c @@ -7,523 +7,1043 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_f16mf4x5(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_f16mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_f16mf2x5(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_f16mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_f16m1x5(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_f16m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_f32mf2x5(float *base, vuint8mf8_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_f32mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_f32m1x5(float *base, vuint8mf4_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_f32m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_f64m1x5(double *base, vuint8mf8_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_f64m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i8mf8x5(int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_i8mf8x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i8mf4x5(int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_i8mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i8mf2x5(int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_i8mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_i8m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i8m1x5(int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_i8m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i16mf4x5(int16_t *base, vuint8mf8_t bindex, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_i16mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i16mf2x5(int16_t *base, vuint8mf4_t bindex, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_i16mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_i16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i16m1x5(int16_t *base, vuint8mf2_t bindex, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_i16m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i32mf2x5(int32_t *base, vuint8mf8_t bindex, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_i32mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_i32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i32m1x5(int32_t *base, vuint8mf4_t bindex, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_i32m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_i64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i64m1x5(int64_t *base, vuint8mf8_t bindex, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_i64m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u8mf8x5(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_u8mf8x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u8mf4x5(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_u8mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u8mf2x5(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_u8mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_u8m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u8m1x5(uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_u8m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u16mf4x5(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_u16mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u16mf2x5(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_u16mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_u16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u16m1x5(uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_u16m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u32mf2x5(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_u32mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_u32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u32m1x5(uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_u32m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_u64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u64m1x5(uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_u64m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_f16mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_f16mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_f16m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_f32mf2x5_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_f32mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_f32m1x5_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_f32m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_f64m1x5_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_f64m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_i8mf8x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_i8mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_i8mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_i8m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_i16mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_i16mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_i16m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_i32mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_i32m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_i64m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_u8mf8x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_u8mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_u8mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_u8m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_u16mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_u16mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_u16m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_u32mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_u32m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8_v_u64m1x5_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei16.c @@ -7,523 +7,1147 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_f16mf4x6(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_f16mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_f16mf2x6(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_f16mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_f16m1x6(_Float16 *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_f16m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_f32mf2x6(float *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_f32mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_f32m1x6(float *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_f32m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_f64m1x6(double *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_f64m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i8mf8x6(int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_i8mf8x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i8mf4x6(int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_i8mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i8mf2x6(int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_i8mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i8m1x6(int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_i8m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i16mf4x6(int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_i16mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i16mf2x6(int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_i16mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i16m1x6(int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_i16m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i32mf2x6(int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_i32mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i32m1x6(int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_i32m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i64m1x6(int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_i64m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u8mf8x6(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_u8mf8x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u8mf4x6(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_u8mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u8mf2x6(uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_u8mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u8m1x6(uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_u8m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u16mf4x6(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_u16mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u16mf2x6(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_u16mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u16m1x6(uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_u16m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u32mf2x6(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_u32mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u32m1x6(uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_u32m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u64m1x6(uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_u64m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_f16mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_f16mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_f16m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_f32mf2x6_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_f32mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_f32m1x6_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_f32m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_f64m1x6_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_f64m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_i8mf8x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_i8mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_i8mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_i8m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_i16mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_i16mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_i16m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_i32mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_i32m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_i64m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_u8mf8x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_u8mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_u8mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_u8m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_u16mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_u16mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_u16m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_u32mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_u32m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16_v_u64m1x6_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei32.c @@ -7,523 +7,1147 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_f16mf4x6(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_f16mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_f16mf2x6(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_f16mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_f16m1x6(_Float16 *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_f16m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_f32mf2x6(float *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_f32mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_f32m1x6(float *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_f32m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_f64m1x6(double *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_f64m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i8mf8x6(int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_i8mf8x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i8mf4x6(int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_i8mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i8mf2x6(int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_i8mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i8m1x6(int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_i8m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i16mf4x6(int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_i16mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i16mf2x6(int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_i16mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i16m1x6(int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_i16m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i32mf2x6(int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_i32mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i32m1x6(int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_i32m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i64m1x6(int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_i64m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u8mf8x6(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_u8mf8x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u8mf4x6(uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_u8mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u8mf2x6(uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_u8mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u8m1x6(uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_u8m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u16mf4x6(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_u16mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u16mf2x6(uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_u16mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u16m1x6(uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_u16m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u32mf2x6(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_u32mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u32m1x6(uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_u32m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u64m1x6(uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_u64m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_f16mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_f16mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_f16m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_f32mf2x6_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_f32mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_f32m1x6_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_f32m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_f64m1x6_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_f64m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_i8mf8x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_i8mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_i8mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_i8m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_i16mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_i16mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_i16m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_i32mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_i32m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_i64m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_u8mf8x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_u8mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_u8mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_u8m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_u16mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_u16mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_u16m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_u32mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_u32m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32_v_u64m1x6_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei64.c @@ -7,523 +7,1147 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_f16mf4x6(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_f16mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_f16mf2x6(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_f16mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_f16m1x6(_Float16 *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_f16m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_f32mf2x6(float *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_f32mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_f32m1x6(float *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_f32m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_f64m1x6(double *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_f64m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i8mf8x6(int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_i8mf8x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i8mf4x6(int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_i8mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i8mf2x6(int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_i8mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i8m1x6(int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_i8m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i16mf4x6(int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_i16mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i16mf2x6(int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_i16mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i16m1x6(int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_i16m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i32mf2x6(int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_i32mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i32m1x6(int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_i32m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i64m1x6(int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_i64m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u8mf8x6(uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_u8mf8x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u8mf4x6(uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_u8mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u8mf2x6(uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_u8mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u8m1x6(uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_u8m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u16mf4x6(uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_u16mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u16mf2x6(uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_u16mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u16m1x6(uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_u16m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u32mf2x6(uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_u32mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u32m1x6(uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_u32m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u64m1x6(uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_u64m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_f16mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_f16mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_f16m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_f32mf2x6_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_f32mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_f32m1x6_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_f32m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_f64m1x6_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_f64m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_i8mf8x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_i8mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_i8mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_i8m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_i16mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_i16mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_i16m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_i32mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_i32m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_i64m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_u8mf8x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_u8mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_u8mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_u8m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_u16mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_u16mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_u16m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_u32mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_u32m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64_v_u64m1x6_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg6ei8.c @@ -7,523 +7,1147 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_f16mf4x6(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_f16mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_f16mf2x6(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_f16mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_f16m1x6(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_f16m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_f32mf2x6(float *base, vuint8mf8_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_f32mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_f32m1x6(float *base, vuint8mf4_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_f32m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_f64m1x6(double *base, vuint8mf8_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_f64m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i8mf8x6(int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_i8mf8x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i8mf4x6(int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_i8mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i8mf2x6(int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_i8mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i8m1x6(int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_i8m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i16mf4x6(int16_t *base, vuint8mf8_t bindex, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_i16mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i16mf2x6(int16_t *base, vuint8mf4_t bindex, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_i16mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i16m1x6(int16_t *base, vuint8mf2_t bindex, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_i16m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i32mf2x6(int32_t *base, vuint8mf8_t bindex, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_i32mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i32m1x6(int32_t *base, vuint8mf4_t bindex, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_i32m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i64m1x6(int64_t *base, vuint8mf8_t bindex, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_i64m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u8mf8x6(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_u8mf8x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u8mf4x6(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_u8mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u8mf2x6(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_u8mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u8m1x6(uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_u8m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u16mf4x6(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_u16mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u16mf2x6(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_u16mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u16m1x6(uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_u16m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u32mf2x6(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_u32mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u32m1x6(uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_u32m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u64m1x6(uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_u64m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_f16mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_f16mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_f16m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_f32mf2x6_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_f32mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_f32m1x6_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_f32m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_f64m1x6_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_f64m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_i8mf8x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_i8mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_i8mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_i8m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_i16mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_i16mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_i16m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_i32mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_i32m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_i64m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_u8mf8x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_u8mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_u8mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_u8m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_u16mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_u16mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_u16m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_u32mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_u32m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8_v_u64m1x6_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei16.c @@ -7,523 +7,1251 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_f16mf4x7(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_f16mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_f16mf2x7(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_f16mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_f16m1x7(_Float16 *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_f16m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_f32mf2x7(float *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_f32mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_f32m1x7(float *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_f32m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_f64m1x7(double *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_f64m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i8mf8x7(int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_i8mf8x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i8mf4x7(int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_i8mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i8mf2x7(int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_i8mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i8m1x7(int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_i8m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i16mf4x7(int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_i16mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i16mf2x7(int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_i16mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i16m1x7(int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_i16m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i32mf2x7(int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_i32mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i32m1x7(int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_i32m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i64m1x7(int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_i64m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u8mf8x7(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_u8mf8x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u8mf4x7(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_u8mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u8mf2x7(uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_u8mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u8m1x7(uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_u8m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u16mf4x7(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_u16mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u16mf2x7(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_u16mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u16m1x7(uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_u16m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u32mf2x7(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_u32mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u32m1x7(uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_u32m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u64m1x7(uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_u64m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_f16mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_f16mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_f16m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_f32mf2x7_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_f32mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_f32m1x7_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_f32m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_f64m1x7_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_f64m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_i8mf8x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_i8mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_i8mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_i8m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_i16mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_i16mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_i16m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_i32mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_i32m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_i64m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_u8mf8x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_u8mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_u8mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_u8m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_u16mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_u16mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_u16m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_u32mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_u32m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16_v_u64m1x7_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei32.c @@ -7,523 +7,1251 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_f16mf4x7(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_f16mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_f16mf2x7(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_f16mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_f16m1x7(_Float16 *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_f16m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_f32mf2x7(float *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_f32mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_f32m1x7(float *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_f32m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_f64m1x7(double *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_f64m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i8mf8x7(int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_i8mf8x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i8mf4x7(int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_i8mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i8mf2x7(int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_i8mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i8m1x7(int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_i8m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i16mf4x7(int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_i16mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i16mf2x7(int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_i16mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i16m1x7(int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_i16m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i32mf2x7(int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_i32mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i32m1x7(int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_i32m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i64m1x7(int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_i64m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u8mf8x7(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_u8mf8x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u8mf4x7(uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_u8mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u8mf2x7(uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_u8mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u8m1x7(uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_u8m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u16mf4x7(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_u16mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u16mf2x7(uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_u16mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u16m1x7(uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_u16m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u32mf2x7(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_u32mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u32m1x7(uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_u32m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u64m1x7(uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_u64m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_f16mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_f16mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_f16m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_f32mf2x7_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_f32mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_f32m1x7_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_f32m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_f64m1x7_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_f64m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_i8mf8x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_i8mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_i8mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_i8m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_i16mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_i16mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_i16m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_i32mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_i32m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_i64m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_u8mf8x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_u8mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_u8mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_u8m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_u16mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_u16mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_u16m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_u32mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_u32m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32_v_u64m1x7_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei64.c @@ -7,523 +7,1251 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_f16mf4x7(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_f16mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_f16mf2x7(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_f16mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_f16m1x7(_Float16 *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_f16m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_f32mf2x7(float *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_f32mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_f32m1x7(float *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_f32m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_f64m1x7(double *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_f64m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i8mf8x7(int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_i8mf8x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i8mf4x7(int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_i8mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i8mf2x7(int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_i8mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i8m1x7(int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_i8m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i16mf4x7(int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_i16mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i16mf2x7(int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_i16mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i16m1x7(int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_i16m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i32mf2x7(int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_i32mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i32m1x7(int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_i32m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i64m1x7(int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_i64m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u8mf8x7(uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_u8mf8x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u8mf4x7(uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_u8mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u8mf2x7(uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_u8mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u8m1x7(uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_u8m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u16mf4x7(uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_u16mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u16mf2x7(uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_u16mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u16m1x7(uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_u16m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u32mf2x7(uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_u32mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u32m1x7(uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_u32m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u64m1x7(uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_u64m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_f16mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_f16mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_f16m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_f32mf2x7_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_f32mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_f32m1x7_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_f32m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_f64m1x7_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_f64m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_i8mf8x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_i8mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_i8mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_i8m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_i16mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_i16mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_i16m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_i32mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_i32m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_i64m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_u8mf8x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_u8mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_u8mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_u8m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_u16mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_u16mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_u16m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_u32mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_u32m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64_v_u64m1x7_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg7ei8.c @@ -7,523 +7,1251 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_f16mf4x7(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_f16mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_f16mf2x7(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_f16mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_f16m1x7(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_f16m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_f32mf2x7(float *base, vuint8mf8_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_f32mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_f32m1x7(float *base, vuint8mf4_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_f32m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_f64m1x7(double *base, vuint8mf8_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_f64m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i8mf8x7(int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_i8mf8x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i8mf4x7(int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_i8mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i8mf2x7(int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_i8mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i8m1x7(int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_i8m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i16mf4x7(int16_t *base, vuint8mf8_t bindex, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_i16mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i16mf2x7(int16_t *base, vuint8mf4_t bindex, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_i16mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i16m1x7(int16_t *base, vuint8mf2_t bindex, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_i16m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i32mf2x7(int32_t *base, vuint8mf8_t bindex, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_i32mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i32m1x7(int32_t *base, vuint8mf4_t bindex, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_i32m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i64m1x7(int64_t *base, vuint8mf8_t bindex, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_i64m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u8mf8x7(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_u8mf8x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u8mf4x7(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_u8mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u8mf2x7(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_u8mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u8m1x7(uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_u8m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u16mf4x7(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_u16mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u16mf2x7(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_u16mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u16m1x7(uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_u16m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u32mf2x7(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_u32mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u32m1x7(uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_u32m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u64m1x7(uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_u64m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_f16mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_f16mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_f16m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_f32mf2x7_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_f32mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_f32m1x7_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_f32m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_f64m1x7_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_f64m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_i8mf8x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_i8mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_i8mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_i8m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_i16mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_i16mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_i16m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_i32mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_i32m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_i64m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_u8mf8x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_u8mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_u8mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_u8m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_u16mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_u16mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_u16m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_u32mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_u32m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8_v_u64m1x7_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei16.c @@ -7,523 +7,1355 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_f16mf4x8(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_f16mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_f16mf2x8(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_f16mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_f16m1x8(_Float16 *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_f16m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_f32mf2x8(float *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_f32mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_f32m1x8(float *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_f32m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_f64m1x8(double *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_f64m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i8mf8x8(int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_i8mf8x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i8mf4x8(int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_i8mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i8mf2x8(int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_i8mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i8m1x8(int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_i8m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i16mf4x8(int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_i16mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i16mf2x8(int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_i16mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i16m1x8(int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_i16m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i32mf2x8(int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_i32mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i32m1x8(int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_i32m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i64m1x8(int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_i64m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u8mf8x8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_u8mf8x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u8mf4x8(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_u8mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u8mf2x8(uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_u8mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u8m1x8(uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_u8m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u16mf4x8(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_u16mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u16mf2x8(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_u16mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u16m1x8(uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_u16m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u32mf2x8(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_u32mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u32m1x8(uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_u32m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u64m1x8(uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_u64m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_f16mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_f16mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_f16m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_f32mf2x8_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_f32mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_f32m1x8_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_f32m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_f64m1x8_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_f64m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_i8mf8x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_i8mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_i8mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_i8m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_i16mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_i16mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_i16m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_i32mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_i32m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_i64m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_u8mf8x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_u8mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_u8mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_u8m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_u16mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_u16mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_u16m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_u32mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_u32m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16_v_u64m1x8_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei32.c @@ -7,523 +7,1355 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_f16mf4x8(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_f16mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_f16mf2x8(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_f16mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_f16m1x8(_Float16 *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_f16m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_f32mf2x8(float *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_f32mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_f32m1x8(float *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_f32m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_f64m1x8(double *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_f64m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i8mf8x8(int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_i8mf8x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i8mf4x8(int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_i8mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i8mf2x8(int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_i8mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i8m1x8(int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_i8m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i16mf4x8(int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_i16mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i16mf2x8(int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_i16mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i16m1x8(int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_i16m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i32mf2x8(int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_i32mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i32m1x8(int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_i32m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i64m1x8(int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_i64m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u8mf8x8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_u8mf8x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u8mf4x8(uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_u8mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u8mf2x8(uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_u8mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u8m1x8(uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_u8m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u16mf4x8(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_u16mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u16mf2x8(uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_u16mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u16m1x8(uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_u16m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u32mf2x8(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_u32mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u32m1x8(uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_u32m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u64m1x8(uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_u64m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_f16mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_f16mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_f16m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_f32mf2x8_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_f32mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_f32m1x8_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_f32m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_f64m1x8_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_f64m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_i8mf8x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_i8mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_i8mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_i8m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_i16mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_i16mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_i16m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_i32mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_i32m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_i64m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_u8mf8x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_u8mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_u8mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_u8m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_u16mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_u16mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_u16m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_u32mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_u32m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32_v_u64m1x8_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei64.c @@ -7,523 +7,1355 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_f16mf4x8(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_f16mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_f16mf2x8(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_f16mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_f16m1x8(_Float16 *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_f16m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_f32mf2x8(float *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_f32mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_f32m1x8(float *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_f32m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_f64m1x8(double *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_f64m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i8mf8x8(int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_i8mf8x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i8mf4x8(int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_i8mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i8mf2x8(int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_i8mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i8m1x8(int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_i8m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i16mf4x8(int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_i16mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i16mf2x8(int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_i16mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i16m1x8(int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_i16m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i32mf2x8(int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_i32mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i32m1x8(int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_i32m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i64m1x8(int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_i64m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u8mf8x8(uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_u8mf8x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u8mf4x8(uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_u8mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u8mf2x8(uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_u8mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u8m1x8(uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_u8m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u16mf4x8(uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_u16mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u16mf2x8(uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_u16mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u16m1x8(uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_u16m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u32mf2x8(uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_u32mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u32m1x8(uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_u32m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u64m1x8(uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_u64m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_f16mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_f16mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_f16m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_f32mf2x8_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_f32mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_f32m1x8_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_f32m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_f64m1x8_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_f64m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_i8mf8x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_i8mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_i8mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_i8m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_i16mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_i16mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_i16m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_i32mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_i32m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_i64m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_u8mf8x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_u8mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_u8mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_u8m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_u16mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_u16mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_u16m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_u32mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_u32m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64_v_u64m1x8_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsoxseg8ei8.c @@ -7,523 +7,1355 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_f16mf4x8(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_f16mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_f16mf2x8(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_f16mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_f16m1x8(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_f16m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_f32mf2x8(float *base, vuint8mf8_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_f32mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_f32m1x8(float *base, vuint8mf4_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_f32m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_f64m1x8(double *base, vuint8mf8_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_f64m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i8mf8x8(int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_i8mf8x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i8mf4x8(int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_i8mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i8mf2x8(int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_i8mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i8m1x8(int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_i8m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i16mf4x8(int16_t *base, vuint8mf8_t bindex, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_i16mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i16mf2x8(int16_t *base, vuint8mf4_t bindex, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_i16mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i16m1x8(int16_t *base, vuint8mf2_t bindex, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_i16m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i32mf2x8(int32_t *base, vuint8mf8_t bindex, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_i32mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i32m1x8(int32_t *base, vuint8mf4_t bindex, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_i32m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i64m1x8(int64_t *base, vuint8mf8_t bindex, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_i64m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u8mf8x8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_u8mf8x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u8mf4x8(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_u8mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u8mf2x8(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_u8mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u8m1x8(uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_u8m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u16mf4x8(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_u16mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u16mf2x8(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_u16mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u16m1x8(uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_u16m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u32mf2x8(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_u32mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u32m1x8(uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_u32m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u64m1x8(uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_u64m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_f16mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_f16mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_f16m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_f32mf2x8_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_f32mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_f32m1x8_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_f32m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_f64m1x8_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_f64m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_i8mf8x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_i8mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_i8mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_i8m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_i16mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_i16mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_i16m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_i32mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_i32m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_i64m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_u8mf8x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_u8mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_u8mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_u8m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_u16mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_u16mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_u16m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_u32mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_u32m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8_v_u64m1x8_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei16.c @@ -7,963 +7,1347 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_f16mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f16mf4x2(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_f16mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_f16mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f16mf2x2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_f16mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_f16m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f16m1x2(_Float16 *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_f16m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_f16m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f16m2x2(_Float16 *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_f16m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16m4(_Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_f16m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f16m4x2(_Float16 *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_f16m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_f32mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f32mf2x2(float *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_f32mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_f32m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f32m1x2(float *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_f32m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_f32m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f32m2x2(float *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_f32m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_f32m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f32m4x2(float *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_f32m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_f64m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f64m1x2(double *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_f64m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_f64m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f64m2x2(double *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_f64m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_f64m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f64m4x2(double *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_f64m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i8mf8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i8mf8x2(int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i8mf8x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i8mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i8mf4x2(int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i8mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i8mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i8mf2x2(int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i8mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i8m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i8m1x2(int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i8m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i8m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i8m2x2(int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i8m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i8m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i8m4x2(int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i8m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i16mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i16mf4x2(int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i16mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i16mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i16mf2x2(int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i16mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i16m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i16m1x2(int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i16m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i16m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i16m2x2(int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i16m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i16m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i16m4x2(int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i16m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i32mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i32mf2x2(int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i32mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i32m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i32m1x2(int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i32m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i32m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i32m2x2(int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i32m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i32m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i32m4x2(int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i32m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i64m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i64m1x2(int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i64m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i64m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i64m2x2(int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i64m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i64m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i64m4x2(int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i64m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u8mf8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u8mf8x2(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u8mf8x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u8mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u8mf4x2(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u8mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u8mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u8mf2x2(uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u8mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u8m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u8m1x2(uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u8m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u8m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u8m2x2(uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u8m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u8m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u8m4x2(uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u8m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u16mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u16mf4x2(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u16mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u16mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u16mf2x2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u16mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u16m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u16m1x2(uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u16m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u16m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u16m2x2(uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u16m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16m4(uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u16m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u16m4x2(uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u16m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u32mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u32mf2x2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u32mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u32m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u32m1x2(uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u32m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u32m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u32m2x2(uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u32m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u32m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u32m4x2(uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u32m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u64m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u64m1x2(uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u64m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u64m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u64m2x2(uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u64m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u64m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u64m4x2(uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u64m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_f16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_f16mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_f16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_f16mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_f16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_f16m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_f16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_f16m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_f16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_f16m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_f32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f32mf2x2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_f32mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_f32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f32m1x2_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_f32m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_f32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f32m2x2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_f32m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_f32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f32m4x2_m(vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_f32m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_f64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f64m1x2_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_f64m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_f64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f64m2x2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_f64m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_f64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f64m4x2_m(vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_f64m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i8mf8_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i8mf8x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i8mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i8mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i8mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i8mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i8m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i8m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i8m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i8m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i8m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i8m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i16mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i16mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i16m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i16m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i16m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i32mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i32m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i32m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i32m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i64m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i64m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_i64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_i64m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u8mf8_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u8mf8x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u8mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u8mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u8mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u8mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u8m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u8m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u8m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u8m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u8m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u8m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u16mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u16mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u16m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u16m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u16m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u32mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u32m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u32m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u32m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u64m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u64m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16_v_u64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16_v_u64m4x2_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei32.c @@ -7,923 +7,1291 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_f16mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f16mf4x2(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_f16mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_f16mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f16mf2x2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_f16mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_f16m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f16m1x2(_Float16 *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_f16m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_f16m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f16m2x2(_Float16 *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_f16m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16m4(_Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_f16m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f16m4x2(_Float16 *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_f16m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_f32mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f32mf2x2(float *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_f32mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_f32m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f32m1x2(float *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_f32m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_f32m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f32m2x2(float *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_f32m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_f32m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f32m4x2(float *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_f32m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_f64m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f64m1x2(double *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_f64m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_f64m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f64m2x2(double *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_f64m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_f64m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f64m4x2(double *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_f64m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_i8mf8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i8mf8x2(int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_i8mf8x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_i8mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i8mf4x2(int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_i8mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_i8mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i8mf2x2(int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_i8mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_i8m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i8m1x2(int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_i8m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_i8m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i8m2x2(int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_i8m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_i16mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i16mf4x2(int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_i16mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_i16mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i16mf2x2(int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_i16mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_i16m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i16m1x2(int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_i16m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_i16m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i16m2x2(int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_i16m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_i16m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i16m4x2(int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_i16m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_i32mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i32mf2x2(int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_i32mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_i32m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i32m1x2(int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_i32m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_i32m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i32m2x2(int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_i32m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_i32m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i32m4x2(int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_i32m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_i64m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i64m1x2(int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_i64m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_i64m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i64m2x2(int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_i64m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_i64m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i64m4x2(int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_i64m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_u8mf8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u8mf8x2(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_u8mf8x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_u8mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u8mf4x2(uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_u8mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_u8mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u8mf2x2(uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_u8mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_u8m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u8m1x2(uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_u8m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_u8m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u8m2x2(uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_u8m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_u16mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u16mf4x2(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_u16mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_u16mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u16mf2x2(uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_u16mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_u16m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u16m1x2(uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_u16m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_u16m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u16m2x2(uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_u16m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16m4(uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_u16m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u16m4x2(uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_u16m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_u32mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u32mf2x2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_u32mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_u32m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u32m1x2(uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_u32m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_u32m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u32m2x2(uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_u32m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_u32m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u32m4x2(uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_u32m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_u64m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u64m1x2(uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_u64m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_u64m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u64m2x2(uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_u64m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_u64m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u64m4x2(uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_u64m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_f16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_f16mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_f16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_f16mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_f16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_f16m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_f16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_f16m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_f16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_f16m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_f32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f32mf2x2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_f32mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_f32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f32m1x2_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_f32m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_f32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f32m2x2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_f32m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_f32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f32m4x2_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_f32m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_f64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f64m1x2_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_f64m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_f64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f64m2x2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_f64m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_f64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f64m4x2_m(vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_f64m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_i8mf8_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_i8mf8x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_i8mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_i8mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_i8mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_i8mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_i8m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_i8m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_i8m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_i8m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_i16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_i16mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_i16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_i16mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_i16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_i16m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_i16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_i16m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_i16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_i16m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_i32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_i32mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_i32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_i32m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_i32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_i32m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_i32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_i32m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_i64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_i64m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_i64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_i64m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_i64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_i64m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_u8mf8_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_u8mf8x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_u8mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_u8mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_u8mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_u8mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_u8m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_u8m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_u8m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_u8m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_u16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_u16mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_u16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_u16mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_u16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_u16m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_u16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_u16m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_u16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_u16m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_u32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_u32mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_u32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_u32m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_u32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_u32m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_u32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_u32m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_u64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_u64m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_u64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_u64m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32_v_u64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32_v_u64m4x2_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei32_tuple.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei32_tuple.c deleted file mode 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei32_tuple.c +++ /dev/null @@ -1,1297 +0,0 @@ -// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 -// REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ -// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ -// RUN: FileCheck --check-prefix=CHECK-RV64 %s - -#include - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f16mf4x2(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_f16mf4x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f16mf2x2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_f16mf2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f16m1x2(_Float16 *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_f16m1x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f16m2x2(_Float16 *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_f16m2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f16m4x2(_Float16 *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_f16m4x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32mf2x2(float *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_f32mf2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32m1x2(float *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_f32m1x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32m2x2(float *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_f32m2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32m4x2(float *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_f32m4x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f64m1x2(double *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_f64m1x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f64m2x2(double *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_f64m2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f64m4x2(double *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_f64m4x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf8x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8mf8x2(int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_i8mf8x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8mf4x2(int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_i8mf4x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8mf2x2(int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_i8mf2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8m1x2(int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_i8m1x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8m2x2(int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_i8m2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16mf4x2(int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_i16mf4x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16mf2x2(int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_i16mf2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16m1x2(int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_i16m1x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16m2x2(int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_i16m2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16m4x2(int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_i16m4x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i32mf2x2(int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_i32mf2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i32m1x2(int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_i32m1x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i32m2x2(int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_i32m2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i32m4x2(int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_i32m4x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i64m1x2(int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_i64m1x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i64m2x2(int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_i64m2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i64m4x2(int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_i64m4x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf8x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8mf8x2(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_u8mf8x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8mf4x2(uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_u8mf4x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8mf2x2(uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_u8mf2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8m1x2(uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_u8m1x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8m2x2(uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_u8m2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16mf4x2(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_u16mf4x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16mf2x2(uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_u16mf2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16m1x2(uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_u16m1x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16m2x2(uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_u16m2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16m4x2(uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_u16m4x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u32mf2x2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_u32mf2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u32m1x2(uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_u32m1x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u32m2x2(uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_u32m2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u32m4x2(uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_u32m4x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u64m1x2(uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_u64m1x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u64m2x2(uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_u64m2x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u64m4x2(uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_u64m4x2(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_f16mf4x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_f16mf2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_f16m1x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_f16m2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_f16m4x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32mf2x2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_f32mf2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32m1x2_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_f32m1x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32m2x2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_f32m2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32m4x2_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_f32m4x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f64m1x2_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_f64m1x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f64m2x2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_f64m2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f64m4x2_m(vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_f64m4x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf8x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_i8mf8x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_i8mf4x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_i8mf2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_i8m1x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_i8m2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_i16mf4x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_i16mf2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_i16m1x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_i16m2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_i16m4x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_i32mf2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_i32m1x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_i32m2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_i32m4x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_i64m1x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_i64m2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_i64m4x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf8x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_u8mf8x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_u8mf4x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_u8mf2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_u8m1x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_u8m2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_u16mf4x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_u16mf2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_u16m1x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_u16m2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_u16m4x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_u32mf2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_u32m1x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_u32m2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_u32m4x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_u64m1x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_u64m2x2_m(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_v_tuple_u64m4x2_m(mask, base, bindex, v_tuple, vl); -} - diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei64.c @@ -7,823 +7,1151 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_f16mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f16mf4x2(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_f16mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_f16mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f16mf2x2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_f16mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_f16m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f16m1x2(_Float16 *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_f16m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_f16m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f16m2x2(_Float16 *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_f16m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_f32mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f32mf2x2(float *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_f32mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_f32m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f32m1x2(float *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_f32m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_f32m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f32m2x2(float *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_f32m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_f32m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f32m4x2(float *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_f32m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_f64m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f64m1x2(double *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_f64m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_f64m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f64m2x2(double *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_f64m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_f64m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f64m4x2(double *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_f64m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_i8mf8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i8mf8x2(int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_i8mf8x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_i8mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i8mf4x2(int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_i8mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_i8mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i8mf2x2(int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_i8mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_i8m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i8m1x2(int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_i8m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_i16mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i16mf4x2(int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_i16mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_i16mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i16mf2x2(int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_i16mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_i16m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i16m1x2(int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_i16m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_i16m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i16m2x2(int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_i16m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_i32mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i32mf2x2(int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_i32mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_i32m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i32m1x2(int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_i32m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_i32m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i32m2x2(int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_i32m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_i32m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i32m4x2(int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_i32m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_i64m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i64m1x2(int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_i64m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_i64m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i64m2x2(int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_i64m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_i64m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i64m4x2(int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_i64m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_u8mf8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u8mf8x2(uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_u8mf8x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_u8mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u8mf4x2(uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_u8mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_u8mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u8mf2x2(uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_u8mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_u8m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u8m1x2(uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_u8m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_u16mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u16mf4x2(uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_u16mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_u16mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u16mf2x2(uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_u16mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_u16m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u16m1x2(uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_u16m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_u16m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u16m2x2(uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_u16m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_u32mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u32mf2x2(uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_u32mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_u32m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u32m1x2(uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_u32m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_u32m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u32m2x2(uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_u32m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_u32m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u32m4x2(uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_u32m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_u64m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u64m1x2(uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_u64m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_u64m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u64m2x2(uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_u64m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_u64m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u64m4x2(uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_u64m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_f16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_f16mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_f16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_f16mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_f16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_f16m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_f16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_f16m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_f32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f32mf2x2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_f32mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_f32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f32m1x2_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_f32m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_f32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f32m2x2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_f32m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_f32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f32m4x2_m(vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_f32m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_f64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f64m1x2_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_f64m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_f64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f64m2x2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_f64m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_f64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f64m4x2_m(vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_f64m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_i8mf8_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_i8mf8x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_i8mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_i8mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_i8mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_i8mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_i8m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_i8m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_i16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_i16mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_i16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_i16mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_i16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_i16m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_i16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_i16m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_i32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_i32mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_i32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_i32m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_i32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_i32m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_i32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_i32m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_i64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_i64m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_i64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_i64m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_i64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_i64m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_u8mf8_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_u8mf8x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_u8mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_u8mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_u8mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_u8mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_u8m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_u8m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_u16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_u16mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_u16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_u16mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_u16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_u16m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_u16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_u16m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_u32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_u32mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_u32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_u32m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_u32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_u32m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_u32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_u32m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_u64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_u64m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_u64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_u64m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64_v_u64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64_v_u64m4x2_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg2ei8.c @@ -7,963 +7,1347 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_f16mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f16mf4x2(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_f16mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_f16mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f16mf2x2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_f16mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_f16m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f16m1x2(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_f16m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_f16m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f16m2x2(_Float16 *base, vuint8m1_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_f16m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16m4(_Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_f16m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f16m4x2(_Float16 *base, vuint8m2_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_f16m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_f32mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f32mf2x2(float *base, vuint8mf8_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_f32mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_f32m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f32m1x2(float *base, vuint8mf4_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_f32m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_f32m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f32m2x2(float *base, vuint8mf2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_f32m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_f32m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f32m4x2(float *base, vuint8m1_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_f32m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_f64m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f64m1x2(double *base, vuint8mf8_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_f64m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_f64m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f64m2x2(double *base, vuint8mf4_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_f64m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_f64m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f64m4x2(double *base, vuint8mf2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_f64m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i8mf8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i8mf8x2(int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i8mf8x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i8mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i8mf4x2(int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i8mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i8mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i8mf2x2(int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i8mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i8m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i8m1x2(int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i8m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i8m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i8m2x2(int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i8m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i8m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i8m4x2(int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i8m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i16mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i16mf4x2(int16_t *base, vuint8mf8_t bindex, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i16mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i16mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i16mf2x2(int16_t *base, vuint8mf4_t bindex, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i16mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i16m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i16m1x2(int16_t *base, vuint8mf2_t bindex, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i16m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i16m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i16m2x2(int16_t *base, vuint8m1_t bindex, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i16m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i16m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i16m4x2(int16_t *base, vuint8m2_t bindex, vint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i16m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i32mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i32mf2x2(int32_t *base, vuint8mf8_t bindex, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i32mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i32m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i32m1x2(int32_t *base, vuint8mf4_t bindex, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i32m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i32m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i32m2x2(int32_t *base, vuint8mf2_t bindex, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i32m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i32m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i32m4x2(int32_t *base, vuint8m1_t bindex, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i32m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i64m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i64m1x2(int64_t *base, vuint8mf8_t bindex, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i64m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i64m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i64m2x2(int64_t *base, vuint8mf4_t bindex, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i64m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i64m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i64m4x2(int64_t *base, vuint8mf2_t bindex, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i64m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u8mf8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u8mf8x2(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u8mf8x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u8mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u8mf4x2(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u8mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u8mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u8mf2x2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u8mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u8m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u8m1x2(uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u8m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u8m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u8m2x2(uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u8m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u8m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u8m4x2(uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u8m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u16mf4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u16mf4x2(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u16mf4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u16mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u16mf2x2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u16mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u16m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u16m1x2(uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u16m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u16m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u16m2x2(uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u16m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u16m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u16m4x2(uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u16m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u32mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u32mf2x2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u32mf2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u32m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u32m1x2(uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u32m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u32m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u32m2x2(uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u32m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u32m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u32m4x2(uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u32m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u64m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u64m1x2(uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u64m1x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u64m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u64m2x2(uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u64m2x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u64m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u64m4x2(uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u64m4x2(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_f16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_f16mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_f16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_f16mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_f16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_f16m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_f16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_f16m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_f16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_f16m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_f32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f32mf2x2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_f32mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_f32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f32m1x2_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_f32m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_f32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f32m2x2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_f32m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_f32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f32m4x2_m(vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_f32m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_f64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f64m1x2_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_f64m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_f64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f64m2x2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_f64m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_f64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f64m4x2_m(vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_f64m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i8mf8_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i8mf8x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i8mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i8mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i8mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i8mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i8m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i8m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i8m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i8m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i8m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i8m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i16mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i16mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i16m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i16m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i16m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i32mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i32m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i32m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i32m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i64m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i64m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_i64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_i64m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u8mf8_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u8mf8x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u8mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u8mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u8mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u8mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u8m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u8m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u8m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u8m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u8m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u8m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u16mf4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u16mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u16m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u16m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u16m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u32mf2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u32m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u32m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u32m4x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u64m1x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u64m2x2_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8_v_u64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8_v_u64m4x2_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei16.c @@ -7,743 +7,1187 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_f16mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f16mf4x3(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_f16mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_f16mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f16mf2x3(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_f16mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_f16m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f16m1x3(_Float16 *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_f16m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_f16m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f16m2x3(_Float16 *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_f16m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_f32mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f32mf2x3(float *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_f32mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_f32m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f32m1x3(float *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_f32m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_f32m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f32m2x3(float *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_f32m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_f64m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f64m1x3(double *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_f64m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_f64m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f64m2x3(double *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_f64m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_i8mf8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i8mf8x3(int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_i8mf8x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_i8mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i8mf4x3(int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_i8mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_i8mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i8mf2x3(int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_i8mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_i8m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i8m1x3(int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_i8m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_i8m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i8m2x3(int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_i8m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_i16mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i16mf4x3(int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_i16mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_i16mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i16mf2x3(int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_i16mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_i16m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i16m1x3(int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_i16m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_i16m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i16m2x3(int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_i16m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_i32mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i32mf2x3(int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_i32mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_i32m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i32m1x3(int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_i32m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_i32m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i32m2x3(int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_i32m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_i64m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i64m1x3(int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_i64m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_i64m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i64m2x3(int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_i64m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_u8mf8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u8mf8x3(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_u8mf8x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_u8mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u8mf4x3(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_u8mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_u8mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u8mf2x3(uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_u8mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_u8m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u8m1x3(uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_u8m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_u8m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u8m2x3(uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_u8m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_u16mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u16mf4x3(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_u16mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_u16mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u16mf2x3(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_u16mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_u16m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u16m1x3(uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_u16m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_u16m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u16m2x3(uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_u16m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_u32mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u32mf2x3(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_u32mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_u32m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u32m1x3(uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_u32m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_u32m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u32m2x3(uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_u32m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_u64m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u64m1x3(uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_u64m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_u64m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u64m2x3(uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_u64m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_f16mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_f16mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_f16m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_f16m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f32mf2x3_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_f32mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f32m1x3_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_f32m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f32m2x3_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_f32m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f64m1x3_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_f64m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f64m2x3_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_f64m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_i8mf8x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_i8mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_i8mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_i8m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_i8m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_i16mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_i16mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_i16m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_i16m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_i32mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_i32m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_i32m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_i64m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_i64m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_u8mf8x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_u8mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_u8mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_u8m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_u8m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_u16mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_u16mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_u16m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_u16m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_u32mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_u32m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_u32m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_u64m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16_v_u64m2x3_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei32.c @@ -7,743 +7,1187 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_f16mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f16mf4x3(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_f16mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_f16mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f16mf2x3(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_f16mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_f16m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f16m1x3(_Float16 *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_f16m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_f16m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f16m2x3(_Float16 *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_f16m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_f32mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f32mf2x3(float *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_f32mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_f32m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f32m1x3(float *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_f32m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_f32m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f32m2x3(float *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_f32m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_f64m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f64m1x3(double *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_f64m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_f64m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f64m2x3(double *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_f64m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_i8mf8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i8mf8x3(int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_i8mf8x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_i8mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i8mf4x3(int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_i8mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_i8mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i8mf2x3(int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_i8mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_i8m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i8m1x3(int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_i8m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_i8m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i8m2x3(int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_i8m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_i16mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i16mf4x3(int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_i16mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_i16mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i16mf2x3(int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_i16mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_i16m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i16m1x3(int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_i16m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_i16m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i16m2x3(int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_i16m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_i32mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i32mf2x3(int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_i32mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_i32m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i32m1x3(int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_i32m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_i32m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i32m2x3(int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_i32m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_i64m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i64m1x3(int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_i64m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_i64m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i64m2x3(int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_i64m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_u8mf8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u8mf8x3(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_u8mf8x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_u8mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u8mf4x3(uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_u8mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_u8mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u8mf2x3(uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_u8mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_u8m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u8m1x3(uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_u8m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_u8m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u8m2x3(uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_u8m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_u16mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u16mf4x3(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_u16mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_u16mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u16mf2x3(uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_u16mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_u16m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u16m1x3(uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_u16m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_u16m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u16m2x3(uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_u16m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_u32mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u32mf2x3(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_u32mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_u32m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u32m1x3(uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_u32m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_u32m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u32m2x3(uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_u32m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_u64m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u64m1x3(uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_u64m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_u64m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u64m2x3(uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_u64m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_f16mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_f16mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_f16m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_f16m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f32mf2x3_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_f32mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f32m1x3_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_f32m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f32m2x3_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_f32m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f64m1x3_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_f64m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f64m2x3_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_f64m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_i8mf8x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_i8mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_i8mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_i8m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_i8m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_i16mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_i16mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_i16m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_i16m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_i32mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_i32m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_i32m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_i64m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_i64m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_u8mf8x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_u8mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_u8mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_u8m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_u8m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_u16mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_u16mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_u16m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_u16m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_u32mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_u32m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_u32m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_u64m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32_v_u64m2x3_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei64.c @@ -7,703 +7,1123 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_f16mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f16mf4x3(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_f16mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_f16mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f16mf2x3(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_f16mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_f16m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f16m1x3(_Float16 *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_f16m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_f16m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f16m2x3(_Float16 *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_f16m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_f32mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f32mf2x3(float *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_f32mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_f32m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f32m1x3(float *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_f32m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_f32m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f32m2x3(float *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_f32m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_f64m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f64m1x3(double *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_f64m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_f64m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f64m2x3(double *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_f64m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_i8mf8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i8mf8x3(int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_i8mf8x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_i8mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i8mf4x3(int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_i8mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_i8mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i8mf2x3(int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_i8mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_i8m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i8m1x3(int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_i8m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_i16mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i16mf4x3(int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_i16mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_i16mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i16mf2x3(int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_i16mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_i16m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i16m1x3(int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_i16m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_i16m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i16m2x3(int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_i16m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_i32mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i32mf2x3(int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_i32mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_i32m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i32m1x3(int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_i32m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_i32m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i32m2x3(int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_i32m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_i64m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i64m1x3(int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_i64m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_i64m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i64m2x3(int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_i64m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_u8mf8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u8mf8x3(uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_u8mf8x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_u8mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u8mf4x3(uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_u8mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_u8mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u8mf2x3(uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_u8mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_u8m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u8m1x3(uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_u8m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_u16mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u16mf4x3(uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_u16mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_u16mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u16mf2x3(uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_u16mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_u16m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u16m1x3(uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_u16m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_u16m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u16m2x3(uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_u16m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_u32mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u32mf2x3(uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_u32mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_u32m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u32m1x3(uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_u32m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_u32m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u32m2x3(uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_u32m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_u64m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u64m1x3(uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_u64m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_u64m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u64m2x3(uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_u64m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_f16mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_f16mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_f16m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_f16m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f32mf2x3_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_f32mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f32m1x3_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_f32m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f32m2x3_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_f32m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f64m1x3_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_f64m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f64m2x3_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_f64m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_i8mf8x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_i8mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_i8mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_i8m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_i16mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_i16mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_i16m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_i16m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_i32mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_i32m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_i32m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_i64m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_i64m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_u8mf8x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_u8mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_u8mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_u8m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_u16mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_u16mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_u16m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_u16m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_u32mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_u32m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_u32m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_u64m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64_v_u64m2x3_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg3ei8.c @@ -7,743 +7,1187 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_f16mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f16mf4x3(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_f16mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_f16mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f16mf2x3(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_f16mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_f16m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f16m1x3(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_f16m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_f16m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f16m2x3(_Float16 *base, vuint8m1_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_f16m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_f32mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f32mf2x3(float *base, vuint8mf8_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_f32mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_f32m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f32m1x3(float *base, vuint8mf4_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_f32m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_f32m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f32m2x3(float *base, vuint8mf2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_f32m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_f64m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f64m1x3(double *base, vuint8mf8_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_f64m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_f64m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f64m2x3(double *base, vuint8mf4_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_f64m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_i8mf8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i8mf8x3(int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_i8mf8x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_i8mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i8mf4x3(int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_i8mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_i8mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i8mf2x3(int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_i8mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_i8m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i8m1x3(int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_i8m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_i8m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i8m2x3(int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_i8m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_i16mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i16mf4x3(int16_t *base, vuint8mf8_t bindex, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_i16mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_i16mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i16mf2x3(int16_t *base, vuint8mf4_t bindex, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_i16mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_i16m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i16m1x3(int16_t *base, vuint8mf2_t bindex, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_i16m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_i16m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i16m2x3(int16_t *base, vuint8m1_t bindex, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_i16m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_i32mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i32mf2x3(int32_t *base, vuint8mf8_t bindex, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_i32mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_i32m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i32m1x3(int32_t *base, vuint8mf4_t bindex, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_i32m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_i32m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i32m2x3(int32_t *base, vuint8mf2_t bindex, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_i32m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_i64m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i64m1x3(int64_t *base, vuint8mf8_t bindex, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_i64m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_i64m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i64m2x3(int64_t *base, vuint8mf4_t bindex, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_i64m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_u8mf8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u8mf8x3(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_u8mf8x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_u8mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u8mf4x3(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_u8mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_u8mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u8mf2x3(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_u8mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_u8m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u8m1x3(uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_u8m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_u8m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u8m2x3(uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_u8m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_u16mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u16mf4x3(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_u16mf4x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_u16mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u16mf2x3(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_u16mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_u16m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u16m1x3(uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_u16m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_u16m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u16m2x3(uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_u16m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_u32mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u32mf2x3(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_u32mf2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_u32m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u32m1x3(uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_u32m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_u32m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u32m2x3(uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_u32m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_u64m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u64m1x3(uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_u64m1x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_u64m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u64m2x3(uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_u64m2x3(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_f16mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_f16mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_f16m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_f16m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f32mf2x3_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_f32mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f32m1x3_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_f32m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f32m2x3_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_f32m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f64m1x3_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_f64m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f64m2x3_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_f64m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_i8mf8x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_i8mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_i8mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_i8m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_i8m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_i16mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_i16mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_i16m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_i16m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_i32mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_i32m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_i32m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_i64m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_i64m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_u8mf8x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_u8mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_u8mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_u8m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_u8m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_u16mf4x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_u16mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_u16m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_u16m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_u32mf2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_u32m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_u32m2x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_u64m1x3_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8_v_u64m2x3_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei16.c @@ -7,743 +7,1335 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f16mf4x4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_f16mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f16mf2x4(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_f16mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_f16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f16m1x4(_Float16 *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_f16m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_f16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f16m2x4(_Float16 *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_f16m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f32mf2x4(float *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_f32mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_f32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f32m1x4(float *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_f32m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_f32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f32m2x4(float *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_f32m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_f64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f64m1x4(double *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_f64m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_f64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f64m2x4(double *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_f64m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i8mf8x4(int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_i8mf8x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i8mf4x4(int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_i8mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i8mf2x4(int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_i8mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_i8m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i8m1x4(int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_i8m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_i8m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i8m2x4(int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_i8m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i16mf4x4(int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_i16mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i16mf2x4(int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_i16mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_i16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i16m1x4(int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_i16m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_i16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i16m2x4(int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_i16m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i32mf2x4(int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_i32mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_i32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i32m1x4(int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_i32m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_i32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i32m2x4(int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_i32m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_i64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i64m1x4(int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_i64m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_i64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i64m2x4(int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_i64m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u8mf8x4(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_u8mf8x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u8mf4x4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_u8mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u8mf2x4(uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_u8mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_u8m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u8m1x4(uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_u8m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_u8m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u8m2x4(uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_u8m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u16mf4x4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_u16mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u16mf2x4(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_u16mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_u16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u16m1x4(uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_u16m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_u16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u16m2x4(uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_u16m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u32mf2x4(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_u32mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_u32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u32m1x4(uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_u32m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_u32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u32m2x4(uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_u32m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_u64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u64m1x4(uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_u64m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_u64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u64m2x4(uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_u64m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_f16mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_f16mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_f16m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_f16m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f32mf2x4_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_f32mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f32m1x4_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_f32m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f32m2x4_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_f32m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f64m1x4_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_f64m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f64m2x4_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_f64m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_i8mf8x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_i8mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_i8mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_i8m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_i8m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_i16mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_i16mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_i16m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_i16m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_i32mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_i32m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_i32m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_i64m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_i64m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_u8mf8x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_u8mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_u8mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_u8m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_u8m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_u16mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_u16mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_u16m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_u16m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_u32mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_u32m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_u32m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_u64m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16_v_u64m2x4_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei32.c @@ -7,743 +7,1335 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f16mf4x4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_f16mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f16mf2x4(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_f16mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_f16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f16m1x4(_Float16 *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_f16m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_f16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f16m2x4(_Float16 *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_f16m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f32mf2x4(float *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_f32mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_f32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f32m1x4(float *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_f32m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_f32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f32m2x4(float *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_f32m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_f64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f64m1x4(double *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_f64m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_f64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f64m2x4(double *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_f64m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i8mf8x4(int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_i8mf8x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i8mf4x4(int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_i8mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i8mf2x4(int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_i8mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_i8m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i8m1x4(int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_i8m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_i8m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i8m2x4(int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_i8m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i16mf4x4(int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_i16mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i16mf2x4(int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_i16mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_i16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i16m1x4(int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_i16m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_i16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i16m2x4(int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_i16m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i32mf2x4(int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_i32mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_i32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i32m1x4(int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_i32m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_i32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i32m2x4(int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_i32m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_i64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i64m1x4(int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_i64m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_i64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i64m2x4(int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_i64m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u8mf8x4(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_u8mf8x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u8mf4x4(uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_u8mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u8mf2x4(uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_u8mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_u8m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u8m1x4(uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_u8m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_u8m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u8m2x4(uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_u8m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u16mf4x4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_u16mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u16mf2x4(uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_u16mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_u16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u16m1x4(uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_u16m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_u16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u16m2x4(uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_u16m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u32mf2x4(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_u32mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_u32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u32m1x4(uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_u32m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_u32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u32m2x4(uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_u32m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_u64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u64m1x4(uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_u64m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_u64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u64m2x4(uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_u64m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_f16mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_f16mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_f16m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_f16m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f32mf2x4_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_f32mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f32m1x4_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_f32m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f32m2x4_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_f32m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f64m1x4_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_f64m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f64m2x4_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_f64m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_i8mf8x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_i8mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_i8mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_i8m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_i8m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_i16mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_i16mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_i16m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_i16m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_i32mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_i32m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_i32m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_i64m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_i64m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_u8mf8x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_u8mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_u8mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_u8m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_u8m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_u16mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_u16mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_u16m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_u16m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_u32mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_u32m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_u32m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_u64m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32_v_u64m2x4_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei64.c @@ -7,703 +7,1263 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f16mf4x4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_f16mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f16mf2x4(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_f16mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_f16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f16m1x4(_Float16 *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_f16m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_f16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f16m2x4(_Float16 *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_f16m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f32mf2x4(float *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_f32mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_f32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f32m1x4(float *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_f32m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_f32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f32m2x4(float *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_f32m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_f64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f64m1x4(double *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_f64m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_f64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f64m2x4(double *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_f64m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i8mf8x4(int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_i8mf8x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i8mf4x4(int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_i8mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i8mf2x4(int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_i8mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_i8m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i8m1x4(int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_i8m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i16mf4x4(int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_i16mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i16mf2x4(int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_i16mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_i16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i16m1x4(int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_i16m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_i16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i16m2x4(int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_i16m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i32mf2x4(int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_i32mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_i32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i32m1x4(int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_i32m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_i32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i32m2x4(int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_i32m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_i64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i64m1x4(int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_i64m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_i64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i64m2x4(int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_i64m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u8mf8x4(uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_u8mf8x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u8mf4x4(uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_u8mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u8mf2x4(uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_u8mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_u8m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u8m1x4(uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_u8m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u16mf4x4(uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_u16mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u16mf2x4(uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_u16mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_u16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u16m1x4(uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_u16m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_u16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u16m2x4(uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_u16m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u32mf2x4(uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_u32mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_u32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u32m1x4(uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_u32m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_u32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u32m2x4(uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_u32m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_u64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u64m1x4(uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_u64m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_u64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u64m2x4(uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_u64m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_f16mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_f16mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_f16m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_f16m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f32mf2x4_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_f32mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f32m1x4_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_f32m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f32m2x4_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_f32m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f64m1x4_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_f64m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f64m2x4_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_f64m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_i8mf8x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_i8mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_i8mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_i8m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_i16mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_i16mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_i16m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_i16m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_i32mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_i32m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_i32m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_i64m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_i64m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_u8mf8x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_u8mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_u8mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_u8m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_u16mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_u16mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_u16m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_u16m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_u32mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_u32m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_u32m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_u64m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64_v_u64m2x4_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg4ei8.c @@ -7,743 +7,1335 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f16mf4x4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_f16mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f16mf2x4(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_f16mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_f16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f16m1x4(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_f16m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_f16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f16m2x4(_Float16 *base, vuint8m1_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_f16m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f32mf2x4(float *base, vuint8mf8_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_f32mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_f32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f32m1x4(float *base, vuint8mf4_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_f32m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_f32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f32m2x4(float *base, vuint8mf2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_f32m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_f64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f64m1x4(double *base, vuint8mf8_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_f64m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_f64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f64m2x4(double *base, vuint8mf4_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_f64m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i8mf8x4(int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_i8mf8x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i8mf4x4(int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_i8mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i8mf2x4(int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_i8mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_i8m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i8m1x4(int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_i8m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_i8m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i8m2x4(int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_i8m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i16mf4x4(int16_t *base, vuint8mf8_t bindex, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_i16mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i16mf2x4(int16_t *base, vuint8mf4_t bindex, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_i16mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_i16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i16m1x4(int16_t *base, vuint8mf2_t bindex, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_i16m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_i16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i16m2x4(int16_t *base, vuint8m1_t bindex, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_i16m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i32mf2x4(int32_t *base, vuint8mf8_t bindex, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_i32mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_i32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i32m1x4(int32_t *base, vuint8mf4_t bindex, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_i32m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_i32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i32m2x4(int32_t *base, vuint8mf2_t bindex, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_i32m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_i64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i64m1x4(int64_t *base, vuint8mf8_t bindex, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_i64m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_i64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i64m2x4(int64_t *base, vuint8mf4_t bindex, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_i64m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u8mf8x4(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_u8mf8x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u8mf4x4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_u8mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u8mf2x4(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_u8mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_u8m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u8m1x4(uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_u8m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_u8m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u8m2x4(uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_u8m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u16mf4x4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_u16mf4x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u16mf2x4(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_u16mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_u16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u16m1x4(uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_u16m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_u16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u16m2x4(uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_u16m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u32mf2x4(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_u32mf2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_u32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u32m1x4(uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_u32m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_u32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u32m2x4(uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_u32m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_u64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u64m1x4(uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_u64m1x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_u64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u64m2x4(uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_u64m2x4(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_f16mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_f16mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_f16m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_f16m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f32mf2x4_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_f32mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f32m1x4_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_f32m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f32m2x4_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_f32m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f64m1x4_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_f64m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f64m2x4_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_f64m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_i8mf8x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_i8mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_i8mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_i8m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_i8m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_i16mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_i16mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_i16m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_i16m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_i32mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_i32m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_i32m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_i64m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_i64m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_u8mf8x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_u8mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_u8mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_u8m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_u8m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_u16mf4x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_u16mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_u16m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_u16m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_u32mf2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_u32m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_u32m2x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_u64m1x4_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8_v_u64m2x4_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei16.c @@ -7,523 +7,1043 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_f16mf4x5(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_f16mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_f16mf2x5(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_f16mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_f16m1x5(_Float16 *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_f16m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_f32mf2x5(float *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_f32mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_f32m1x5(float *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_f32m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_f64m1x5(double *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_f64m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i8mf8x5(int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_i8mf8x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i8mf4x5(int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_i8mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i8mf2x5(int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_i8mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_i8m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i8m1x5(int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_i8m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i16mf4x5(int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_i16mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i16mf2x5(int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_i16mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_i16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i16m1x5(int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_i16m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i32mf2x5(int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_i32mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_i32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i32m1x5(int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_i32m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_i64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i64m1x5(int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_i64m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u8mf8x5(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_u8mf8x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u8mf4x5(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_u8mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u8mf2x5(uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_u8mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_u8m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u8m1x5(uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_u8m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u16mf4x5(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_u16mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u16mf2x5(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_u16mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_u16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u16m1x5(uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_u16m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u32mf2x5(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_u32mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_u32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u32m1x5(uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_u32m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_u64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u64m1x5(uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_u64m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_f16mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_f16mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_f16m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_f32mf2x5_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_f32mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_f32m1x5_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_f32m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_f64m1x5_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_f64m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_i8mf8x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_i8mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_i8mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_i8m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_i16mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_i16mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_i16m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_i32mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_i32m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_i64m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_u8mf8x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_u8mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_u8mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_u8m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_u16mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_u16mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_u16m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_u32mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_u32m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16_v_u64m1x5_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei32.c @@ -7,523 +7,1043 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_f16mf4x5(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_f16mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_f16mf2x5(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_f16mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_f16m1x5(_Float16 *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_f16m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_f32mf2x5(float *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_f32mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_f32m1x5(float *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_f32m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_f64m1x5(double *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_f64m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i8mf8x5(int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_i8mf8x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i8mf4x5(int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_i8mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i8mf2x5(int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_i8mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_i8m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i8m1x5(int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_i8m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i16mf4x5(int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_i16mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i16mf2x5(int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_i16mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_i16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i16m1x5(int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_i16m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i32mf2x5(int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_i32mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_i32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i32m1x5(int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_i32m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_i64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i64m1x5(int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_i64m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u8mf8x5(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_u8mf8x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u8mf4x5(uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_u8mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u8mf2x5(uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_u8mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_u8m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u8m1x5(uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_u8m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u16mf4x5(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_u16mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u16mf2x5(uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_u16mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_u16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u16m1x5(uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_u16m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u32mf2x5(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_u32mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_u32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u32m1x5(uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_u32m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_u64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u64m1x5(uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_u64m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_f16mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_f16mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_f16m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_f32mf2x5_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_f32mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_f32m1x5_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_f32m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_f64m1x5_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_f64m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_i8mf8x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_i8mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_i8mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_i8m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_i16mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_i16mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_i16m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_i32mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_i32m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_i64m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_u8mf8x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_u8mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_u8mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_u8m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_u16mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_u16mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_u16m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_u32mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_u32m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32_v_u64m1x5_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei64.c @@ -7,523 +7,1043 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_f16mf4x5(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_f16mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_f16mf2x5(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_f16mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_f16m1x5(_Float16 *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_f16m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_f32mf2x5(float *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_f32mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_f32m1x5(float *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_f32m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_f64m1x5(double *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_f64m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i8mf8x5(int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_i8mf8x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i8mf4x5(int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_i8mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i8mf2x5(int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_i8mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_i8m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i8m1x5(int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_i8m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i16mf4x5(int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_i16mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i16mf2x5(int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_i16mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_i16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i16m1x5(int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_i16m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i32mf2x5(int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_i32mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_i32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i32m1x5(int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_i32m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_i64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i64m1x5(int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_i64m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u8mf8x5(uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_u8mf8x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u8mf4x5(uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_u8mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u8mf2x5(uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_u8mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_u8m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u8m1x5(uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_u8m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u16mf4x5(uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_u16mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u16mf2x5(uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_u16mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_u16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u16m1x5(uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_u16m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u32mf2x5(uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_u32mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_u32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u32m1x5(uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_u32m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_u64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u64m1x5(uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_u64m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_f16mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_f16mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_f16m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_f32mf2x5_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_f32mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_f32m1x5_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_f32m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_f64m1x5_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_f64m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_i8mf8x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_i8mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_i8mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_i8m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_i16mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_i16mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_i16m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_i32mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_i32m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_i64m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_u8mf8x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_u8mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_u8mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_u8m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_u16mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_u16mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_u16m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_u32mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_u32m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64_v_u64m1x5_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg5ei8.c @@ -7,523 +7,1043 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_f16mf4x5(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_f16mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_f16mf2x5(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_f16mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_f16m1x5(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_f16m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_f32mf2x5(float *base, vuint8mf8_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_f32mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_f32m1x5(float *base, vuint8mf4_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_f32m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_f64m1x5(double *base, vuint8mf8_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_f64m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i8mf8x5(int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_i8mf8x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i8mf4x5(int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_i8mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i8mf2x5(int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_i8mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_i8m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i8m1x5(int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_i8m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i16mf4x5(int16_t *base, vuint8mf8_t bindex, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_i16mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i16mf2x5(int16_t *base, vuint8mf4_t bindex, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_i16mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_i16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i16m1x5(int16_t *base, vuint8mf2_t bindex, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_i16m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i32mf2x5(int32_t *base, vuint8mf8_t bindex, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_i32mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_i32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i32m1x5(int32_t *base, vuint8mf4_t bindex, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_i32m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_i64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i64m1x5(int64_t *base, vuint8mf8_t bindex, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_i64m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u8mf8x5(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_u8mf8x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u8mf4x5(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_u8mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u8mf2x5(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_u8mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_u8m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u8m1x5(uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_u8m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u16mf4x5(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_u16mf4x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u16mf2x5(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_u16mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_u16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u16m1x5(uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_u16m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u32mf2x5(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_u32mf2x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_u32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u32m1x5(uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_u32m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_u64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u64m1x5(uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_u64m1x5(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_f16mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_f16mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_f16m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_f32mf2x5_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_f32mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_f32m1x5_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_f32m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_f64m1x5_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_f64m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_i8mf8x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_i8mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_i8mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_i8m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_i16mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_i16mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_i16m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_i32mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_i32m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_i64m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_u8mf8x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_u8mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_u8mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_u8m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_u16mf4x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_u16mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_u16m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_u32mf2x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_u32m1x5_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8_v_u64m1x5_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei16.c @@ -7,523 +7,1147 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_f16mf4x6(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_f16mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_f16mf2x6(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_f16mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_f16m1x6(_Float16 *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_f16m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_f32mf2x6(float *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_f32mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_f32m1x6(float *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_f32m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_f64m1x6(double *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_f64m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i8mf8x6(int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_i8mf8x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i8mf4x6(int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_i8mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i8mf2x6(int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_i8mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i8m1x6(int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_i8m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i16mf4x6(int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_i16mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i16mf2x6(int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_i16mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i16m1x6(int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_i16m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i32mf2x6(int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_i32mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i32m1x6(int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_i32m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i64m1x6(int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_i64m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u8mf8x6(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_u8mf8x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u8mf4x6(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_u8mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u8mf2x6(uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_u8mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u8m1x6(uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_u8m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u16mf4x6(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_u16mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u16mf2x6(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_u16mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u16m1x6(uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_u16m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u32mf2x6(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_u32mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u32m1x6(uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_u32m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u64m1x6(uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_u64m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_f16mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_f16mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_f16m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_f32mf2x6_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_f32mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_f32m1x6_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_f32m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_f64m1x6_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_f64m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_i8mf8x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_i8mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_i8mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_i8m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_i16mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_i16mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_i16m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_i32mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_i32m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_i64m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_u8mf8x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_u8mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_u8mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_u8m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_u16mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_u16mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_u16m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_u32mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_u32m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16_v_u64m1x6_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei32.c @@ -7,523 +7,1147 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_f16mf4x6(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_f16mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_f16mf2x6(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_f16mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_f16m1x6(_Float16 *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_f16m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_f32mf2x6(float *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_f32mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_f32m1x6(float *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_f32m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_f64m1x6(double *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_f64m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i8mf8x6(int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_i8mf8x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i8mf4x6(int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_i8mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i8mf2x6(int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_i8mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i8m1x6(int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_i8m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i16mf4x6(int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_i16mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i16mf2x6(int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_i16mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i16m1x6(int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_i16m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i32mf2x6(int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_i32mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i32m1x6(int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_i32m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i64m1x6(int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_i64m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u8mf8x6(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_u8mf8x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u8mf4x6(uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_u8mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u8mf2x6(uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_u8mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u8m1x6(uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_u8m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u16mf4x6(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_u16mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u16mf2x6(uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_u16mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u16m1x6(uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_u16m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u32mf2x6(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_u32mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u32m1x6(uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_u32m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u64m1x6(uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_u64m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_f16mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_f16mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_f16m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_f32mf2x6_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_f32mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_f32m1x6_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_f32m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_f64m1x6_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_f64m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_i8mf8x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_i8mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_i8mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_i8m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_i16mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_i16mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_i16m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_i32mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_i32m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_i64m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_u8mf8x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_u8mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_u8mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_u8m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_u16mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_u16mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_u16m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_u32mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_u32m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32_v_u64m1x6_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei64.c @@ -7,523 +7,1147 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_f16mf4x6(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_f16mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_f16mf2x6(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_f16mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_f16m1x6(_Float16 *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_f16m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_f32mf2x6(float *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_f32mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_f32m1x6(float *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_f32m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_f64m1x6(double *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_f64m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i8mf8x6(int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_i8mf8x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i8mf4x6(int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_i8mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i8mf2x6(int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_i8mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i8m1x6(int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_i8m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i16mf4x6(int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_i16mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i16mf2x6(int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_i16mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i16m1x6(int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_i16m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i32mf2x6(int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_i32mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i32m1x6(int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_i32m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i64m1x6(int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_i64m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u8mf8x6(uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_u8mf8x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u8mf4x6(uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_u8mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u8mf2x6(uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_u8mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u8m1x6(uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_u8m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u16mf4x6(uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_u16mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u16mf2x6(uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_u16mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u16m1x6(uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_u16m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u32mf2x6(uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_u32mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u32m1x6(uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_u32m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u64m1x6(uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_u64m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_f16mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_f16mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_f16m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_f32mf2x6_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_f32mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_f32m1x6_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_f32m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_f64m1x6_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_f64m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_i8mf8x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_i8mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_i8mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_i8m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_i16mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_i16mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_i16m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_i32mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_i32m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_i64m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_u8mf8x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_u8mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_u8mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_u8m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_u16mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_u16mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_u16m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_u32mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_u32m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64_v_u64m1x6_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg6ei8.c @@ -7,523 +7,1147 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_f16mf4x6(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_f16mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_f16mf2x6(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_f16mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_f16m1x6(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_f16m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_f32mf2x6(float *base, vuint8mf8_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_f32mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_f32m1x6(float *base, vuint8mf4_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_f32m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_f64m1x6(double *base, vuint8mf8_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_f64m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i8mf8x6(int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_i8mf8x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i8mf4x6(int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_i8mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i8mf2x6(int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_i8mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i8m1x6(int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_i8m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i16mf4x6(int16_t *base, vuint8mf8_t bindex, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_i16mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i16mf2x6(int16_t *base, vuint8mf4_t bindex, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_i16mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i16m1x6(int16_t *base, vuint8mf2_t bindex, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_i16m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i32mf2x6(int32_t *base, vuint8mf8_t bindex, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_i32mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i32m1x6(int32_t *base, vuint8mf4_t bindex, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_i32m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i64m1x6(int64_t *base, vuint8mf8_t bindex, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_i64m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u8mf8x6(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_u8mf8x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u8mf4x6(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_u8mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u8mf2x6(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_u8mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u8m1x6(uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_u8m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u16mf4x6(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_u16mf4x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u16mf2x6(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_u16mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u16m1x6(uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_u16m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u32mf2x6(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_u32mf2x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u32m1x6(uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_u32m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u64m1x6(uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_u64m1x6(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_f16mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_f16mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_f16m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_f32mf2x6_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_f32mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_f32m1x6_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_f32m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_f64m1x6_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_f64m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_i8mf8x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_i8mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_i8mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_i8m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_i16mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_i16mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_i16m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_i32mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_i32m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_i64m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_u8mf8x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_u8mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_u8mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_u8m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_u16mf4x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_u16mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_u16m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_u32mf2x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_u32m1x6_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8_v_u64m1x6_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei16.c @@ -7,523 +7,1251 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_f16mf4x7(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_f16mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_f16mf2x7(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_f16mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_f16m1x7(_Float16 *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_f16m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_f32mf2x7(float *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_f32mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_f32m1x7(float *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_f32m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_f64m1x7(double *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_f64m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i8mf8x7(int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_i8mf8x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i8mf4x7(int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_i8mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i8mf2x7(int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_i8mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i8m1x7(int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_i8m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i16mf4x7(int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_i16mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i16mf2x7(int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_i16mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i16m1x7(int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_i16m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i32mf2x7(int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_i32mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i32m1x7(int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_i32m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i64m1x7(int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_i64m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u8mf8x7(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_u8mf8x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u8mf4x7(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_u8mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u8mf2x7(uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_u8mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u8m1x7(uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_u8m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u16mf4x7(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_u16mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u16mf2x7(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_u16mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u16m1x7(uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_u16m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u32mf2x7(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_u32mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u32m1x7(uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_u32m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u64m1x7(uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_u64m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_f16mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_f16mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_f16m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_f32mf2x7_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_f32mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_f32m1x7_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_f32m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_f64m1x7_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_f64m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_i8mf8x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_i8mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_i8mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_i8m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_i16mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_i16mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_i16m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_i32mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_i32m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_i64m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_u8mf8x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_u8mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_u8mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_u8m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_u16mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_u16mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_u16m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_u32mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_u32m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16_v_u64m1x7_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei32.c @@ -7,523 +7,1251 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_f16mf4x7(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_f16mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_f16mf2x7(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_f16mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_f16m1x7(_Float16 *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_f16m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_f32mf2x7(float *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_f32mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_f32m1x7(float *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_f32m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_f64m1x7(double *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_f64m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i8mf8x7(int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_i8mf8x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i8mf4x7(int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_i8mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i8mf2x7(int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_i8mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i8m1x7(int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_i8m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i16mf4x7(int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_i16mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i16mf2x7(int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_i16mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i16m1x7(int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_i16m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i32mf2x7(int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_i32mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i32m1x7(int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_i32m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i64m1x7(int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_i64m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u8mf8x7(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_u8mf8x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u8mf4x7(uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_u8mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u8mf2x7(uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_u8mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u8m1x7(uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_u8m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u16mf4x7(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_u16mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u16mf2x7(uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_u16mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u16m1x7(uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_u16m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u32mf2x7(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_u32mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u32m1x7(uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_u32m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u64m1x7(uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_u64m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_f16mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_f16mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_f16m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_f32mf2x7_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_f32mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_f32m1x7_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_f32m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_f64m1x7_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_f64m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_i8mf8x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_i8mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_i8mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_i8m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_i16mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_i16mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_i16m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_i32mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_i32m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_i64m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_u8mf8x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_u8mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_u8mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_u8m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_u16mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_u16mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_u16m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_u32mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_u32m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32_v_u64m1x7_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei64.c @@ -7,523 +7,1251 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_f16mf4x7(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_f16mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_f16mf2x7(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_f16mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_f16m1x7(_Float16 *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_f16m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_f32mf2x7(float *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_f32mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_f32m1x7(float *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_f32m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_f64m1x7(double *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_f64m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i8mf8x7(int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_i8mf8x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i8mf4x7(int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_i8mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i8mf2x7(int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_i8mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i8m1x7(int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_i8m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i16mf4x7(int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_i16mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i16mf2x7(int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_i16mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i16m1x7(int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_i16m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i32mf2x7(int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_i32mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i32m1x7(int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_i32m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i64m1x7(int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_i64m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u8mf8x7(uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_u8mf8x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u8mf4x7(uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_u8mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u8mf2x7(uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_u8mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u8m1x7(uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_u8m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u16mf4x7(uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_u16mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u16mf2x7(uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_u16mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u16m1x7(uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_u16m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u32mf2x7(uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_u32mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u32m1x7(uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_u32m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u64m1x7(uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_u64m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_f16mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_f16mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_f16m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_f32mf2x7_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_f32mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_f32m1x7_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_f32m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_f64m1x7_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_f64m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_i8mf8x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_i8mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_i8mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_i8m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_i16mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_i16mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_i16m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_i32mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_i32m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_i64m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_u8mf8x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_u8mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_u8mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_u8m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_u16mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_u16mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_u16m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_u32mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_u32m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64_v_u64m1x7_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg7ei8.c @@ -7,523 +7,1251 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_f16mf4x7(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_f16mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_f16mf2x7(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_f16mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_f16m1x7(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_f16m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_f32mf2x7(float *base, vuint8mf8_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_f32mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_f32m1x7(float *base, vuint8mf4_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_f32m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_f64m1x7(double *base, vuint8mf8_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_f64m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i8mf8x7(int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_i8mf8x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i8mf4x7(int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_i8mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i8mf2x7(int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_i8mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i8m1x7(int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_i8m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i16mf4x7(int16_t *base, vuint8mf8_t bindex, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_i16mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i16mf2x7(int16_t *base, vuint8mf4_t bindex, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_i16mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i16m1x7(int16_t *base, vuint8mf2_t bindex, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_i16m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i32mf2x7(int32_t *base, vuint8mf8_t bindex, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_i32mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i32m1x7(int32_t *base, vuint8mf4_t bindex, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_i32m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i64m1x7(int64_t *base, vuint8mf8_t bindex, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_i64m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u8mf8x7(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_u8mf8x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u8mf4x7(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_u8mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u8mf2x7(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_u8mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u8m1x7(uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_u8m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u16mf4x7(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_u16mf4x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u16mf2x7(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_u16mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u16m1x7(uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_u16m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u32mf2x7(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_u32mf2x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u32m1x7(uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_u32m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u64m1x7(uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_u64m1x7(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_f16mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_f16mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_f16m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_f32mf2x7_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_f32mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_f32m1x7_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_f32m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_f64m1x7_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_f64m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_i8mf8x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_i8mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_i8mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_i8m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_i16mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_i16mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_i16m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_i32mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_i32m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_i64m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_u8mf8x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_u8mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_u8mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_u8m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_u16mf4x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_u16mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_u16m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_u32mf2x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_u32m1x7_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8_v_u64m1x7_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei16.c @@ -7,523 +7,1355 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_f16mf4x8(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_f16mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_f16mf2x8(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_f16mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_f16m1x8(_Float16 *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_f16m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_f32mf2x8(float *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_f32mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_f32m1x8(float *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_f32m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_f64m1x8(double *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_f64m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i8mf8x8(int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_i8mf8x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i8mf4x8(int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_i8mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i8mf2x8(int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_i8mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i8m1x8(int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_i8m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i16mf4x8(int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_i16mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i16mf2x8(int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_i16mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i16m1x8(int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_i16m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i32mf2x8(int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_i32mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i32m1x8(int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_i32m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i64m1x8(int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_i64m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u8mf8x8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_u8mf8x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u8mf4x8(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_u8mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u8mf2x8(uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_u8mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u8m1x8(uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_u8m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u16mf4x8(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_u16mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u16mf2x8(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_u16mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u16m1x8(uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_u16m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u32mf2x8(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_u32mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u32m1x8(uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_u32m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u64m1x8(uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_u64m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_f16mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_f16mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_f16m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_f32mf2x8_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_f32mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_f32m1x8_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_f32m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_f64m1x8_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_f64m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_i8mf8x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_i8mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_i8mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_i8m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_i16mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_i16mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_i16m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_i32mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_i32m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_i64m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_u8mf8x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_u8mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_u8mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_u8m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_u16mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_u16mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_u16m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_u32mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_u32m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16_v_u64m1x8_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei32.c @@ -7,523 +7,1355 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_f16mf4x8(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_f16mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_f16mf2x8(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_f16mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_f16m1x8(_Float16 *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_f16m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_f32mf2x8(float *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_f32mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_f32m1x8(float *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_f32m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_f64m1x8(double *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_f64m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i8mf8x8(int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_i8mf8x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i8mf4x8(int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_i8mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i8mf2x8(int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_i8mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i8m1x8(int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_i8m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i16mf4x8(int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_i16mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i16mf2x8(int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_i16mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i16m1x8(int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_i16m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i32mf2x8(int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_i32mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i32m1x8(int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_i32m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i64m1x8(int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_i64m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u8mf8x8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_u8mf8x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u8mf4x8(uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_u8mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u8mf2x8(uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_u8mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u8m1x8(uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_u8m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u16mf4x8(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_u16mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u16mf2x8(uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_u16mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u16m1x8(uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_u16m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u32mf2x8(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_u32mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u32m1x8(uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_u32m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u64m1x8(uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_u64m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_f16mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_f16mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_f16m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_f32mf2x8_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_f32mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_f32m1x8_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_f32m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_f64m1x8_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_f64m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_i8mf8x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_i8mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_i8mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_i8m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_i16mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_i16mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_i16m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_i32mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_i32m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_i64m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_u8mf8x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_u8mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_u8mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_u8m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_u16mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_u16mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_u16m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_u32mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_u32m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32_v_u64m1x8_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei64.c @@ -7,523 +7,1355 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_f16mf4x8(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_f16mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_f16mf2x8(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_f16mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_f16m1x8(_Float16 *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_f16m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_f32mf2x8(float *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_f32mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_f32m1x8(float *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_f32m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_f64m1x8(double *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_f64m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i8mf8x8(int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_i8mf8x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i8mf4x8(int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_i8mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i8mf2x8(int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_i8mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i8m1x8(int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_i8m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i16mf4x8(int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_i16mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i16mf2x8(int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_i16mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i16m1x8(int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_i16m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i32mf2x8(int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_i32mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i32m1x8(int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_i32m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i64m1x8(int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_i64m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u8mf8x8(uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_u8mf8x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u8mf4x8(uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_u8mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u8mf2x8(uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_u8mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u8m1x8(uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_u8m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u16mf4x8(uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_u16mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u16mf2x8(uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_u16mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u16m1x8(uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_u16m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u32mf2x8(uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_u32mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u32m1x8(uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_u32m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u64m1x8(uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_u64m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_f16mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_f16mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_f16m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_f32mf2x8_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_f32mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_f32m1x8_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_f32m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_f64m1x8_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_f64m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_i8mf8x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_i8mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_i8mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_i8m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_i16mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_i16mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_i16m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_i32mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_i32m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_i64m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_u8mf8x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_u8mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_u8mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_u8m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_u16mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_u16mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_u16m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_u32mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_u32m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64_v_u64m1x8_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsuxseg8ei8.c @@ -7,523 +7,1355 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_f16mf4x8(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_f16mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_f16mf2x8(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_f16mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_f16m1x8(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_f16m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_f32mf2x8(float *base, vuint8mf8_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_f32mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_f32m1x8(float *base, vuint8mf4_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_f32m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_f64m1x8(double *base, vuint8mf8_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_f64m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_i8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i8mf8x8(int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_i8mf8x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_i8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i8mf4x8(int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_i8mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_i8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i8mf2x8(int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_i8mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_i8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i8m1x8(int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_i8m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_i16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i16mf4x8(int16_t *base, vuint8mf8_t bindex, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_i16mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_i16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i16mf2x8(int16_t *base, vuint8mf4_t bindex, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_i16mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_i16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i16m1x8(int16_t *base, vuint8mf2_t bindex, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_i16m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_i32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i32mf2x8(int32_t *base, vuint8mf8_t bindex, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_i32mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_i32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i32m1x8(int32_t *base, vuint8mf4_t bindex, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_i32m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_i64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i64m1x8(int64_t *base, vuint8mf8_t bindex, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_i64m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_u8mf8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u8mf8x8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_u8mf8x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_u8mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u8mf4x8(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_u8mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_u8mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u8mf2x8(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_u8mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_u8m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u8m1x8(uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_u8m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_u16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u16mf4x8(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_u16mf4x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_u16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u16mf2x8(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_u16mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_u16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u16m1x8(uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_u16m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_u32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u32mf2x8(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_u32mf2x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_u32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u32m1x8(uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_u32m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_u64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u64m1x8(uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_u64m1x8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_f16mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_f16mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_f16m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_f32mf2x8_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_f32mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_f32m1x8_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_f32m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_f64m1x8_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_f64m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_i8mf8x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_i8mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_i8mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_i8m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_i16mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_i16mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_i16m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_i32mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_i32m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_i64m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_u8mf8x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_u8mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_u8mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_u8m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_u16mf4x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_u16mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_u16m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_u32mf2x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_u32m1x8_m(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8_v_u64m1x8_m(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei16.c @@ -7,963 +7,1347 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f16mf4x2(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f16mf2x2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f16m1x2(_Float16 *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f16m2x2(_Float16 *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16m4(_Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f16m4x2(_Float16 *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f32mf2x2(float *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f32m1x2(float *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f32m2x2(float *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f32m4x2(float *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f64m1x2(double *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f64m2x2(double *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f64m4x2(double *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i8mf8x2(int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i8mf4x2(int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i8mf2x2(int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i8m1x2(int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i8m2x2(int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i8m4x2(int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i16mf4x2(int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i16mf2x2(int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i16m1x2(int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i16m2x2(int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i16m4x2(int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i32mf2x2(int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i32m1x2(int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i32m2x2(int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i32m4x2(int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i64m1x2(int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i64m2x2(int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i64m4x2(int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u8mf8x2(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u8mf4x2(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u8mf2x2(uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u8m1x2(uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u8m2x2(uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u8m4x2(uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u16mf4x2(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u16mf2x2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u16m1x2(uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u16m2x2(uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16m4(uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u16m4x2(uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u32mf2x2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u32m1x2(uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u32m2x2(uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u32m4x2(uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u64m1x2(uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u64m2x2(uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u64m4x2(uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f32mf2x2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f32m1x2_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f32m2x2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f32m4x2_m(vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f64m1x2_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f64m2x2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_f64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f64m4x2_m(vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i8m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_i64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u8m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei16_v_u64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei16(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei32.c @@ -7,923 +7,1291 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f16mf4x2(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f16mf2x2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f16m1x2(_Float16 *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f16m2x2(_Float16 *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16m4(_Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f16m4x2(_Float16 *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f32mf2x2(float *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f32m1x2(float *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f32m2x2(float *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f32m4x2(float *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f64m1x2(double *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f64m2x2(double *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f64m4x2(double *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i8mf8x2(int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i8mf4x2(int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i8mf2x2(int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i8m1x2(int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i8m2x2(int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i16mf4x2(int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i16mf2x2(int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i16m1x2(int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i16m2x2(int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i16m4x2(int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i32mf2x2(int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i32m1x2(int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i32m2x2(int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i32m4x2(int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i64m1x2(int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i64m2x2(int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i64m4x2(int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u8mf8x2(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u8mf4x2(uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u8mf2x2(uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u8m1x2(uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u8m2x2(uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u16mf4x2(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u16mf2x2(uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u16m1x2(uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u16m2x2(uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16m4(uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u16m4x2(uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u32mf2x2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u32m1x2(uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u32m2x2(uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u32m4x2(uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u64m1x2(uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u64m2x2(uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u64m4x2(uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f32mf2x2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f32m1x2_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f32m2x2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f32m4x2_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f64m1x2_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f64m2x2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f64m4x2_m(vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei32(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei32_tuple.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei32_tuple.c deleted file mode 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei32_tuple.c +++ /dev/null @@ -1,1297 +0,0 @@ -// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 -// REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ -// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ -// RUN: FileCheck --check-prefix=CHECK-RV64 %s - -#include - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f16mf4x2(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f16mf2x2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f16m1x2(_Float16 *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f16m2x2(_Float16 *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f16m4x2(_Float16 *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32mf2x2(float *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32m1x2(float *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32m2x2(float *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32m4x2(float *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f64m1x2(double *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f64m2x2(double *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f64m4x2(double *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf8x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8mf8x2(int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8mf4x2(int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8mf2x2(int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8m1x2(int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8m2x2(int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16mf4x2(int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16mf2x2(int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16m1x2(int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16m2x2(int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16m4x2(int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i32mf2x2(int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i32m1x2(int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i32m2x2(int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i32m4x2(int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i64m1x2(int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i64m2x2(int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i64m4x2(int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf8x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8mf8x2(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8mf4x2(uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8mf2x2(uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8m1x2(uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8m2x2(uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16mf4x2(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16mf2x2(uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16m1x2(uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16m2x2(uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16m4x2(uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u32mf2x2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u32m1x2(uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u32m2x2(uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u32m4x2(uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u64m1x2(uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u64m2x2(uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u64m4x2(uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f16m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32mf2x2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32m1x2_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32m2x2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f32m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f32m4x2_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f64m1x2_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f64m2x2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_f64m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_f64m4x2_m(vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf8x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i8m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i16m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i32m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_i64m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf8x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u8m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u16m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u32m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_u64m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxseg2ei32_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsoxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei64.c @@ -7,823 +7,1151 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f16mf4x2(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f16mf2x2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f16m1x2(_Float16 *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f16m2x2(_Float16 *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f32mf2x2(float *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f32m1x2(float *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f32m2x2(float *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f32m4x2(float *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f64m1x2(double *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f64m2x2(double *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f64m4x2(double *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i8mf8x2(int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i8mf4x2(int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i8mf2x2(int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i8m1x2(int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i16mf4x2(int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i16mf2x2(int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i16m1x2(int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i16m2x2(int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i32mf2x2(int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i32m1x2(int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i32m2x2(int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i32m4x2(int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i64m1x2(int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i64m2x2(int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i64m4x2(int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u8mf8x2(uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u8mf4x2(uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u8mf2x2(uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u8m1x2(uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u16mf4x2(uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u16mf2x2(uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u16m1x2(uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u16m2x2(uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u32mf2x2(uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u32m1x2(uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u32m2x2(uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u32m4x2(uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u64m1x2(uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u64m2x2(uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u64m4x2(uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f32mf2x2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f32m1x2_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f32m2x2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f32m4x2_m(vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f64m1x2_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f64m2x2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_f64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f64m4x2_m(vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_i64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_u64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei64(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg2ei8.c @@ -7,963 +7,1347 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f16mf4x2(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f16mf2x2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f16m1x2(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f16m2x2(_Float16 *base, vuint8m1_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16m4(_Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f16m4x2(_Float16 *base, vuint8m2_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f32mf2x2(float *base, vuint8mf8_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f32m1x2(float *base, vuint8mf4_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f32m2x2(float *base, vuint8mf2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f32m4x2(float *base, vuint8m1_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f64m1x2(double *base, vuint8mf8_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f64m2x2(double *base, vuint8mf4_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f64m4x2(double *base, vuint8mf2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i8mf8x2(int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i8mf4x2(int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i8mf2x2(int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i8m1x2(int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i8m2x2(int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i8m4x2(int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i16mf4x2(int16_t *base, vuint8mf8_t bindex, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i16mf2x2(int16_t *base, vuint8mf4_t bindex, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i16m1x2(int16_t *base, vuint8mf2_t bindex, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i16m2x2(int16_t *base, vuint8m1_t bindex, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i16m4x2(int16_t *base, vuint8m2_t bindex, vint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i32mf2x2(int32_t *base, vuint8mf8_t bindex, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i32m1x2(int32_t *base, vuint8mf4_t bindex, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i32m2x2(int32_t *base, vuint8mf2_t bindex, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i32m4x2(int32_t *base, vuint8m1_t bindex, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i64m1x2(int64_t *base, vuint8mf8_t bindex, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i64m2x2(int64_t *base, vuint8mf4_t bindex, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i64m4x2(int64_t *base, vuint8mf2_t bindex, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i8.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u8mf8x2(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i8.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u8mf4x2(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i8.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u8mf2x2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i8.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u8m1x2(uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i8.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u8m2x2(uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv32i8.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u8m4x2(uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i16.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u16mf4x2(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i16.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u16mf2x2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i16.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u16m1x2(uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i16.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u16m2x2(uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16i16.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u16m4x2(uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i32.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u32mf2x2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i32.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u32m1x2(uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i32.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u32m2x2(uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8i32.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u32m4x2(uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1i64.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u64m1x2(uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2i64.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u64m2x2(uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4i64.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u64m4x2(uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f32mf2x2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f32m1x2_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f32m2x2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f32m4x2_m(vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f64m1x2_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f64m2x2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_f64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f64m4x2_m(vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i8m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_i64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u8m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_u64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vsoxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsoxseg2ei8(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei16.c @@ -7,743 +7,1187 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f16mf4x3(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f16mf2x3(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f16m1x3(_Float16 *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f16m2x3(_Float16 *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f32mf2x3(float *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f32m1x3(float *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f32m2x3(float *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f64m1x3(double *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f64m2x3(double *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i8mf8x3(int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i8mf4x3(int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i8mf2x3(int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i8m1x3(int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i8m2x3(int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i16mf4x3(int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i16mf2x3(int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i16m1x3(int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i16m2x3(int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i32mf2x3(int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i32m1x3(int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i32m2x3(int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i64m1x3(int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i64m2x3(int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u8mf8x3(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u8mf4x3(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u8mf2x3(uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u8m1x3(uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u8m2x3(uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u16mf4x3(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u16mf2x3(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u16m1x3(uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u16m2x3(uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u32mf2x3(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u32m1x3(uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u32m2x3(uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u64m1x3(uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u64m2x3(uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f32mf2x3_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f32m1x3_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f32m2x3_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f64m1x3_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_f64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f64m2x3_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_i64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei16_v_u64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei16(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei32.c @@ -7,743 +7,1187 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f16mf4x3(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f16mf2x3(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f16m1x3(_Float16 *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f16m2x3(_Float16 *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f32mf2x3(float *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f32m1x3(float *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f32m2x3(float *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f64m1x3(double *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f64m2x3(double *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i8mf8x3(int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i8mf4x3(int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i8mf2x3(int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i8m1x3(int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i8m2x3(int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i16mf4x3(int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i16mf2x3(int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i16m1x3(int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i16m2x3(int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i32mf2x3(int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i32m1x3(int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i32m2x3(int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i64m1x3(int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i64m2x3(int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u8mf8x3(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u8mf4x3(uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u8mf2x3(uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u8m1x3(uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u8m2x3(uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u16mf4x3(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u16mf2x3(uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u16m1x3(uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u16m2x3(uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u32mf2x3(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u32m1x3(uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u32m2x3(uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u64m1x3(uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u64m2x3(uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f32mf2x3_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f32m1x3_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f32m2x3_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f64m1x3_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_f64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f64m2x3_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_i64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_u64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei32(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei64.c @@ -7,703 +7,1123 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f16mf4x3(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f16mf2x3(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f16m1x3(_Float16 *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f16m2x3(_Float16 *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f32mf2x3(float *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f32m1x3(float *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f32m2x3(float *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f64m1x3(double *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f64m2x3(double *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i8mf8x3(int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i8mf4x3(int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i8mf2x3(int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i8m1x3(int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i16mf4x3(int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i16mf2x3(int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i16m1x3(int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i16m2x3(int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i32mf2x3(int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i32m1x3(int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i32m2x3(int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i64m1x3(int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i64m2x3(int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u8mf8x3(uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u8mf4x3(uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u8mf2x3(uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u8m1x3(uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u16mf4x3(uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u16mf2x3(uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u16m1x3(uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u16m2x3(uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u32mf2x3(uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u32m1x3(uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u32m2x3(uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u64m1x3(uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u64m2x3(uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f32mf2x3_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f32m1x3_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f32m2x3_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f64m1x3_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_f64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f64m2x3_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_i64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_u64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei64(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg3ei8.c @@ -7,743 +7,1187 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f16mf4x3(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f16mf2x3(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f16m1x3(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f16m2x3(_Float16 *base, vuint8m1_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f32mf2x3(float *base, vuint8mf8_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f32m1x3(float *base, vuint8mf4_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f32m2x3(float *base, vuint8mf2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f64m1x3(double *base, vuint8mf8_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f64m2x3(double *base, vuint8mf4_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i8mf8x3(int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i8mf4x3(int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i8mf2x3(int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i8m1x3(int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i8m2x3(int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i16mf4x3(int16_t *base, vuint8mf8_t bindex, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i16mf2x3(int16_t *base, vuint8mf4_t bindex, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i16m1x3(int16_t *base, vuint8mf2_t bindex, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i16m2x3(int16_t *base, vuint8m1_t bindex, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i32mf2x3(int32_t *base, vuint8mf8_t bindex, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i32m1x3(int32_t *base, vuint8mf4_t bindex, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i32m2x3(int32_t *base, vuint8mf2_t bindex, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i64m1x3(int64_t *base, vuint8mf8_t bindex, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i64m2x3(int64_t *base, vuint8mf4_t bindex, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i8.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u8mf8x3(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u8mf4x3(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u8mf2x3(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u8m1x3(uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u8m2x3(uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i16.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u16mf4x3(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u16mf2x3(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u16m1x3(uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u16m2x3(uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i32.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u32mf2x3(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u32m1x3(uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u32m2x3(uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1i64.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u64m1x3(uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2i64.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u64m2x3(uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f32mf2x3_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f32m1x3_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f32m2x3_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f64m1x3_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_f64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f64m2x3_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_i64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_u64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vsoxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsoxseg3ei8(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei16.c @@ -7,743 +7,1335 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f16mf4x4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f16mf2x4(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f16m1x4(_Float16 *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f16m2x4(_Float16 *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f32mf2x4(float *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f32m1x4(float *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f32m2x4(float *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f64m1x4(double *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f64m2x4(double *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i8mf8x4(int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i8mf4x4(int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i8mf2x4(int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i8m1x4(int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i8m2x4(int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i16mf4x4(int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i16mf2x4(int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i16m1x4(int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i16m2x4(int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i32mf2x4(int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i32m1x4(int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i32m2x4(int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i64m1x4(int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i64m2x4(int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u8mf8x4(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u8mf4x4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u8mf2x4(uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u8m1x4(uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u8m2x4(uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u16mf4x4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u16mf2x4(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u16m1x4(uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u16m2x4(uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u32mf2x4(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u32m1x4(uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u32m2x4(uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u64m1x4(uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u64m2x4(uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f32mf2x4_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f32m1x4_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f32m2x4_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f64m1x4_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_f64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f64m2x4_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_i64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei16_v_u64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei16(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei32.c @@ -7,743 +7,1335 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f16mf4x4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f16mf2x4(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f16m1x4(_Float16 *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f16m2x4(_Float16 *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f32mf2x4(float *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f32m1x4(float *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f32m2x4(float *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f64m1x4(double *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f64m2x4(double *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i8mf8x4(int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i8mf4x4(int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i8mf2x4(int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i8m1x4(int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i8m2x4(int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i16mf4x4(int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i16mf2x4(int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i16m1x4(int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i16m2x4(int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i32mf2x4(int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i32m1x4(int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i32m2x4(int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i64m1x4(int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i64m2x4(int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u8mf8x4(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u8mf4x4(uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u8mf2x4(uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u8m1x4(uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u8m2x4(uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u16mf4x4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u16mf2x4(uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u16m1x4(uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u16m2x4(uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u32mf2x4(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u32m1x4(uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u32m2x4(uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u64m1x4(uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u64m2x4(uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f32mf2x4_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f32m1x4_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f32m2x4_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f64m1x4_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_f64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f64m2x4_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_i64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_u64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei32(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei64.c @@ -7,703 +7,1263 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f16mf4x4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f16mf2x4(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f16m1x4(_Float16 *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f16m2x4(_Float16 *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f32mf2x4(float *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f32m1x4(float *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f32m2x4(float *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f64m1x4(double *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f64m2x4(double *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i8mf8x4(int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i8mf4x4(int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i8mf2x4(int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i8m1x4(int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i16mf4x4(int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i16mf2x4(int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i16m1x4(int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i16m2x4(int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i32mf2x4(int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i32m1x4(int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i32m2x4(int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i64m1x4(int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i64m2x4(int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u8mf8x4(uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u8mf4x4(uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u8mf2x4(uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u8m1x4(uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u16mf4x4(uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u16mf2x4(uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u16m1x4(uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u16m2x4(uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u32mf2x4(uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u32m1x4(uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u32m2x4(uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u64m1x4(uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u64m2x4(uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f32mf2x4_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f32m1x4_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f32m2x4_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f64m1x4_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_f64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f64m2x4_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_i64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_u64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei64(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg4ei8.c @@ -7,743 +7,1335 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f16mf4x4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f16mf2x4(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f16m1x4(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f16m2x4(_Float16 *base, vuint8m1_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f32mf2x4(float *base, vuint8mf8_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f32m1x4(float *base, vuint8mf4_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f32m2x4(float *base, vuint8mf2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f64m1x4(double *base, vuint8mf8_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f64m2x4(double *base, vuint8mf4_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i8mf8x4(int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i8mf4x4(int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i8mf2x4(int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i8m1x4(int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i8m2x4(int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i16mf4x4(int16_t *base, vuint8mf8_t bindex, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i16mf2x4(int16_t *base, vuint8mf4_t bindex, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i16m1x4(int16_t *base, vuint8mf2_t bindex, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i16m2x4(int16_t *base, vuint8m1_t bindex, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i32mf2x4(int32_t *base, vuint8mf8_t bindex, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i32m1x4(int32_t *base, vuint8mf4_t bindex, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i32m2x4(int32_t *base, vuint8mf2_t bindex, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i64m1x4(int64_t *base, vuint8mf8_t bindex, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i64m2x4(int64_t *base, vuint8mf4_t bindex, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i8.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u8mf8x4(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i8.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u8mf4x4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i8.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u8mf2x4(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i8.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u8m1x4(uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv16i8.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u8m2x4(uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i16.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u16mf4x4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i16.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u16mf2x4(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i16.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u16m1x4(uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8i16.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u16m2x4(uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i32.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u32mf2x4(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i32.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u32m1x4(uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4i32.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u32m2x4(uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1i64.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u64m1x4(uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2i64.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u64m2x4(uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f32mf2x4_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f32m1x4_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f32m2x4_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f64m1x4_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_f64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f64m2x4_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_i64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_u64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vsoxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsoxseg4ei8(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei16.c @@ -7,523 +7,1043 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_f16mf4x5(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_f16mf2x5(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_f16m1x5(_Float16 *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_f32mf2x5(float *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_f32m1x5(float *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_f64m1x5(double *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i8mf8x5(int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i8mf4x5(int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i8mf2x5(int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i8m1x5(int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i16mf4x5(int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i16mf2x5(int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i16m1x5(int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i32mf2x5(int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i32m1x5(int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i64m1x5(int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u8mf8x5(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u8mf4x5(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u8mf2x5(uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u8m1x5(uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u16mf4x5(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u16mf2x5(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u16m1x5(uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u32mf2x5(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u32m1x5(uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u64m1x5(uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_f32mf2x5_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_f32m1x5_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_f64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_f64m1x5_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_i64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei16_v_u64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei16(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei32.c @@ -7,523 +7,1043 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_f16mf4x5(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_f16mf2x5(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_f16m1x5(_Float16 *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_f32mf2x5(float *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_f32m1x5(float *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_f64m1x5(double *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i8mf8x5(int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i8mf4x5(int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i8mf2x5(int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i8m1x5(int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i16mf4x5(int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i16mf2x5(int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i16m1x5(int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i32mf2x5(int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i32m1x5(int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i64m1x5(int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u8mf8x5(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u8mf4x5(uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u8mf2x5(uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u8m1x5(uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u16mf4x5(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u16mf2x5(uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u16m1x5(uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u32mf2x5(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u32m1x5(uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u64m1x5(uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_f32mf2x5_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_f32m1x5_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_f64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_f64m1x5_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_i64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_u64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei32_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei32(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei64.c @@ -7,523 +7,1043 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_f16mf4x5(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_f16mf2x5(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_f16m1x5(_Float16 *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_f32mf2x5(float *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_f32m1x5(float *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_f64m1x5(double *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i8mf8x5(int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i8mf4x5(int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i8mf2x5(int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i8m1x5(int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i16mf4x5(int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i16mf2x5(int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i16m1x5(int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i32mf2x5(int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i32m1x5(int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i64m1x5(int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u8mf8x5(uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u8mf4x5(uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u8mf2x5(uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u8m1x5(uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u16mf4x5(uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u16mf2x5(uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u16m1x5(uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u32mf2x5(uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u32m1x5(uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u64m1x5(uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_f32mf2x5_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_f32m1x5_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_f64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_f64m1x5_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_i64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_u64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei64_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei64(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg5ei8.c @@ -7,523 +7,1043 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_f16mf4x5(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_f16mf2x5(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_f16m1x5(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_f32mf2x5(float *base, vuint8mf8_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_f32m1x5(float *base, vuint8mf4_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_f64m1x5(double *base, vuint8mf8_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i8mf8x5(int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i8mf4x5(int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i8mf2x5(int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i8m1x5(int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i16mf4x5(int16_t *base, vuint8mf8_t bindex, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i16mf2x5(int16_t *base, vuint8mf4_t bindex, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i16m1x5(int16_t *base, vuint8mf2_t bindex, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i32mf2x5(int32_t *base, vuint8mf8_t bindex, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i32m1x5(int32_t *base, vuint8mf4_t bindex, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i64m1x5(int64_t *base, vuint8mf8_t bindex, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i8.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u8mf8x5(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i8.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u8mf4x5(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u8mf2x5(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u8m1x5(uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i16.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u16mf4x5(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i16.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u16mf2x5(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u16m1x5(uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i32.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u32mf2x5(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2i32.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u32m1x5(uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1i64.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u64m1x5(uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_f32mf2x5_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_f32m1x5_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_f64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_f64m1x5_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_i64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_u64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vsoxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsoxseg5ei8(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei16.c @@ -7,523 +7,1147 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_f16mf4x6(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_f16mf2x6(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_f16m1x6(_Float16 *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_f32mf2x6(float *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_f32m1x6(float *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_f64m1x6(double *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i8mf8x6(int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i8mf4x6(int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i8mf2x6(int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i8m1x6(int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i16mf4x6(int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i16mf2x6(int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i16m1x6(int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i32mf2x6(int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i32m1x6(int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i64m1x6(int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u8mf8x6(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u8mf4x6(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u8mf2x6(uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u8m1x6(uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u16mf4x6(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u16mf2x6(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u16m1x6(uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u32mf2x6(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u32m1x6(uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u64m1x6(uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_f32mf2x6_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_f32m1x6_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_f64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_f64m1x6_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_i64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei16_v_u64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei16(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei32.c @@ -7,523 +7,1147 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_f16mf4x6(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_f16mf2x6(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_f16m1x6(_Float16 *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_f32mf2x6(float *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_f32m1x6(float *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_f64m1x6(double *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i8mf8x6(int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i8mf4x6(int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i8mf2x6(int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i8m1x6(int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i16mf4x6(int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i16mf2x6(int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i16m1x6(int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i32mf2x6(int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i32m1x6(int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i64m1x6(int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u8mf8x6(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u8mf4x6(uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u8mf2x6(uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u8m1x6(uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u16mf4x6(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u16mf2x6(uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u16m1x6(uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u32mf2x6(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u32m1x6(uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u64m1x6(uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_f32mf2x6_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_f32m1x6_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_f64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_f64m1x6_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_i64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_u64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei32_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei32(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei64.c @@ -7,523 +7,1147 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_f16mf4x6(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_f16mf2x6(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_f16m1x6(_Float16 *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_f32mf2x6(float *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_f32m1x6(float *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_f64m1x6(double *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i8mf8x6(int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i8mf4x6(int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i8mf2x6(int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i8m1x6(int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i16mf4x6(int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i16mf2x6(int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i16m1x6(int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i32mf2x6(int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i32m1x6(int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i64m1x6(int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u8mf8x6(uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u8mf4x6(uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u8mf2x6(uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u8m1x6(uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u16mf4x6(uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u16mf2x6(uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u16m1x6(uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u32mf2x6(uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u32m1x6(uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u64m1x6(uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_f32mf2x6_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_f32m1x6_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_f64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_f64m1x6_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_i64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_u64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei64_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei64(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg6ei8.c @@ -7,523 +7,1147 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_f16mf4x6(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_f16mf2x6(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_f16m1x6(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_f32mf2x6(float *base, vuint8mf8_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_f32m1x6(float *base, vuint8mf4_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_f64m1x6(double *base, vuint8mf8_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i8mf8x6(int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i8mf4x6(int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i8mf2x6(int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i8m1x6(int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i16mf4x6(int16_t *base, vuint8mf8_t bindex, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i16mf2x6(int16_t *base, vuint8mf4_t bindex, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i16m1x6(int16_t *base, vuint8mf2_t bindex, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i32mf2x6(int32_t *base, vuint8mf8_t bindex, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i32m1x6(int32_t *base, vuint8mf4_t bindex, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i64m1x6(int64_t *base, vuint8mf8_t bindex, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i8.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u8mf8x6(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i8.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u8mf4x6(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i8.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u8mf2x6(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv8i8.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u8m1x6(uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i16.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u16mf4x6(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i16.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u16mf2x6(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4i16.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u16m1x6(uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i32.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u32mf2x6(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2i32.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u32m1x6(uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1i64.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u64m1x6(uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_f32mf2x6_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_f32m1x6_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_f64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_f64m1x6_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_i64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_u64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vsoxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsoxseg6ei8(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei16.c @@ -7,523 +7,1251 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_f16mf4x7(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_f16mf2x7(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_f16m1x7(_Float16 *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_f32mf2x7(float *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_f32m1x7(float *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_f64m1x7(double *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i8mf8x7(int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i8mf4x7(int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i8mf2x7(int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i8m1x7(int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i16mf4x7(int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i16mf2x7(int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i16m1x7(int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i32mf2x7(int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i32m1x7(int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i64m1x7(int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u8mf8x7(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u8mf4x7(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u8mf2x7(uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u8m1x7(uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u16mf4x7(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u16mf2x7(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u16m1x7(uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u32mf2x7(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u32m1x7(uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u64m1x7(uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_f32mf2x7_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_f32m1x7_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_f64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_f64m1x7_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_i64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei16_v_u64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei16(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei32.c @@ -7,523 +7,1251 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_f16mf4x7(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_f16mf2x7(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_f16m1x7(_Float16 *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_f32mf2x7(float *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_f32m1x7(float *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_f64m1x7(double *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i8mf8x7(int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i8mf4x7(int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i8mf2x7(int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i8m1x7(int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i16mf4x7(int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i16mf2x7(int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i16m1x7(int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i32mf2x7(int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i32m1x7(int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i64m1x7(int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u8mf8x7(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u8mf4x7(uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u8mf2x7(uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u8m1x7(uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u16mf4x7(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u16mf2x7(uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u16m1x7(uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u32mf2x7(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u32m1x7(uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u64m1x7(uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_f32mf2x7_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_f32m1x7_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_f64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_f64m1x7_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_i64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_u64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei32_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei32(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei64.c @@ -7,523 +7,1251 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_f16mf4x7(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_f16mf2x7(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_f16m1x7(_Float16 *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_f32mf2x7(float *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_f32m1x7(float *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_f64m1x7(double *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i8mf8x7(int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i8mf4x7(int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i8mf2x7(int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i8m1x7(int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i16mf4x7(int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i16mf2x7(int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i16m1x7(int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i32mf2x7(int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i32m1x7(int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i64m1x7(int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u8mf8x7(uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u8mf4x7(uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u8mf2x7(uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u8m1x7(uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u16mf4x7(uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u16mf2x7(uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u16m1x7(uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u32mf2x7(uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u32m1x7(uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u64m1x7(uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_f32mf2x7_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_f32m1x7_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_f64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_f64m1x7_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_i64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_u64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei64_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei64(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg7ei8.c @@ -7,523 +7,1251 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_f16mf4x7(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_f16mf2x7(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_f16m1x7(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_f32mf2x7(float *base, vuint8mf8_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_f32m1x7(float *base, vuint8mf4_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_f64m1x7(double *base, vuint8mf8_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i8mf8x7(int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i8mf4x7(int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i8mf2x7(int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i8m1x7(int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i16mf4x7(int16_t *base, vuint8mf8_t bindex, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i16mf2x7(int16_t *base, vuint8mf4_t bindex, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i16m1x7(int16_t *base, vuint8mf2_t bindex, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i32mf2x7(int32_t *base, vuint8mf8_t bindex, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i32m1x7(int32_t *base, vuint8mf4_t bindex, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i64m1x7(int64_t *base, vuint8mf8_t bindex, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i8.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u8mf8x7(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i8.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u8mf4x7(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i8.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u8mf2x7(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv8i8.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u8m1x7(uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i16.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u16mf4x7(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i16.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u16mf2x7(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4i16.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u16m1x7(uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i32.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u32mf2x7(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2i32.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u32m1x7(uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u64m1x7(uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_f32mf2x7_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_f32m1x7_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_f64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_f64m1x7_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_i64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_u64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vsoxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsoxseg7ei8(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei16.c @@ -7,523 +7,1355 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_f16mf4x8(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_f16mf2x8(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_f16m1x8(_Float16 *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_f32mf2x8(float *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_f32m1x8(float *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_f64m1x8(double *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i8mf8x8(int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i8mf4x8(int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i8mf2x8(int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i8m1x8(int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i16mf4x8(int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i16mf2x8(int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i16m1x8(int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i32mf2x8(int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i32m1x8(int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i64m1x8(int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u8mf8x8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u8mf4x8(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u8mf2x8(uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u8m1x8(uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u16mf4x8(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u16mf2x8(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u16m1x8(uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u32mf2x8(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u32m1x8(uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u64m1x8(uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_f32mf2x8_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_f32m1x8_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_f64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_f64m1x8_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_i64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei16_v_u64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei16(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei32.c @@ -7,523 +7,1355 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_f16mf4x8(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_f16mf2x8(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_f16m1x8(_Float16 *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_f32mf2x8(float *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_f32m1x8(float *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_f64m1x8(double *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i8mf8x8(int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i8mf4x8(int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i8mf2x8(int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i8m1x8(int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i16mf4x8(int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i16mf2x8(int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i16m1x8(int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i32mf2x8(int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i32m1x8(int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i64m1x8(int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u8mf8x8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u8mf4x8(uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u8mf2x8(uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u8m1x8(uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u16mf4x8(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u16mf2x8(uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u16m1x8(uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u32mf2x8(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u32m1x8(uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u64m1x8(uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_f32mf2x8_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_f32m1x8_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_f64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_f64m1x8_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_i64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_u64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei32_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei32(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei64.c @@ -7,523 +7,1355 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_f16mf4x8(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_f16mf2x8(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_f16m1x8(_Float16 *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_f32mf2x8(float *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_f32m1x8(float *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_f64m1x8(double *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i8mf8x8(int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i8mf4x8(int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i8mf2x8(int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i8m1x8(int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i16mf4x8(int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i16mf2x8(int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i16m1x8(int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i32mf2x8(int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i32m1x8(int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i64m1x8(int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u8mf8x8(uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u8mf4x8(uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u8mf2x8(uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u8m1x8(uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u16mf4x8(uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u16mf2x8(uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u16m1x8(uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u32mf2x8(uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u32m1x8(uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u64m1x8(uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_f32mf2x8_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_f32m1x8_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_f64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_f64m1x8_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_i64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_u64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei64_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei64(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsoxseg8ei8.c @@ -7,523 +7,1355 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_f16mf4x8(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_f16mf2x8(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_f16m1x8(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_f32mf2x8(float *base, vuint8mf8_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_f32m1x8(float *base, vuint8mf4_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_f64m1x8(double *base, vuint8mf8_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i8mf8x8(int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i8mf4x8(int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i8mf2x8(int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i8m1x8(int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i16mf4x8(int16_t *base, vuint8mf8_t bindex, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i16mf2x8(int16_t *base, vuint8mf4_t bindex, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i16m1x8(int16_t *base, vuint8mf2_t bindex, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i32mf2x8(int32_t *base, vuint8mf8_t bindex, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i32m1x8(int32_t *base, vuint8mf4_t bindex, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i64m1x8(int64_t *base, vuint8mf8_t bindex, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i8.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u8mf8x8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i8.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u8mf4x8(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i8.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u8mf2x8(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv8i8.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u8m1x8(uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i16.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u16mf4x8(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i16.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u16mf2x8(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4i16.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u16m1x8(uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i32.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u32mf2x8(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2i32.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u32m1x8(uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1i64.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u64m1x8(uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_f32mf2x8_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_f32m1x8_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_f64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_f64m1x8_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_i64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_u64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vsoxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsoxseg8ei8(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei16.c @@ -7,963 +7,1347 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f16mf4x2(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f16mf2x2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f16m1x2(_Float16 *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f16m2x2(_Float16 *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16m4(_Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f16m4x2(_Float16 *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f32mf2x2(float *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f32m1x2(float *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f32m2x2(float *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f32m4x2(float *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f64m1x2(double *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f64m2x2(double *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f64m4x2(double *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i8mf8x2(int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i8mf4x2(int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i8mf2x2(int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i8m1x2(int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i8m2x2(int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i8m4x2(int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i16mf4x2(int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i16mf2x2(int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i16m1x2(int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i16m2x2(int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i16m4x2(int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i32mf2x2(int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i32m1x2(int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i32m2x2(int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i32m4x2(int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i64m1x2(int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i64m2x2(int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i64m4x2(int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u8mf8x2(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u8mf4x2(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u8mf2x2(uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u8m1x2(uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u8m2x2(uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u8m4x2(uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u16mf4x2(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u16mf2x2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u16m1x2(uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u16m2x2(uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16m4(uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u16m4x2(uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u32mf2x2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u32m1x2(uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u32m2x2(uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u32m4x2(uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u64m1x2(uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u64m2x2(uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u64m4x2(uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f32mf2x2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f32m1x2_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f32m2x2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f32m4x2_m(vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f64m1x2_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f64m2x2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_f64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f64m4x2_m(vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i8m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_i64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u8m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei16_v_u64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i16.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i16.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei16(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei16(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei32.c @@ -7,923 +7,1291 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f16mf4x2(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f16mf2x2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f16m1x2(_Float16 *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f16m2x2(_Float16 *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16m4(_Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f16m4x2(_Float16 *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f32mf2x2(float *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f32m1x2(float *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f32m2x2(float *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f32m4x2(float *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f64m1x2(double *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f64m2x2(double *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f64m4x2(double *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i8mf8x2(int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i8mf4x2(int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i8mf2x2(int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i8m1x2(int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i8m2x2(int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i16mf4x2(int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i16mf2x2(int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i16m1x2(int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i16m2x2(int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i16m4x2(int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i32mf2x2(int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i32m1x2(int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i32m2x2(int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i32m4x2(int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i64m1x2(int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i64m2x2(int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i64m4x2(int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u8mf8x2(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u8mf4x2(uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u8mf2x2(uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u8m1x2(uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u8m2x2(uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u16mf4x2(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u16mf2x2(uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u16m1x2(uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u16m2x2(uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16m4(uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u16m4x2(uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u32mf2x2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u32m1x2(uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u32m2x2(uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u32m4x2(uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u64m1x2(uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u64m2x2(uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u64m4x2(uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f32mf2x2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f32m1x2_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f32m2x2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f32m4x2_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f64m1x2_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f64m2x2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f64m4x2_m(vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i32.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei32(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei32(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei32_tuple.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei32_tuple.c deleted file mode 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei32_tuple.c +++ /dev/null @@ -1,1297 +0,0 @@ -// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 -// REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ -// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ -// RUN: FileCheck --check-prefix=CHECK-RV64 %s - -#include - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f16mf4x2(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f16mf2x2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f16m1x2(_Float16 *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f16m2x2(_Float16 *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f16m4x2(_Float16 *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32mf2x2(float *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32m1x2(float *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32m2x2(float *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32m4x2(float *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f64m1x2(double *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f64m2x2(double *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f64m4x2(double *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf8x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8mf8x2(int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8mf4x2(int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8mf2x2(int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8m1x2(int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8m2x2(int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16mf4x2(int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16mf2x2(int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16m1x2(int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16m2x2(int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16m4x2(int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i32mf2x2(int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i32m1x2(int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i32m2x2(int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i32m4x2(int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i64m1x2(int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i64m2x2(int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i64m4x2(int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf8x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8mf8x2(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8mf4x2(uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8mf2x2(uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8m1x2(uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8m2x2(uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16mf4x2(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16mf2x2(uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16m1x2(uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16m2x2(uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16m4x2(uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32mf2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u32mf2x2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u32m1x2(uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u32m2x2(uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u32m4x2(uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m1x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u64m1x2(uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m2x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u64m2x2(uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m4x2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u64m4x2(uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f16m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32mf2x2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32m1x2_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32m2x2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f32m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f32m4x2_m(vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f64m1x2_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f64m2x2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_f64m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_f64m4x2_m(vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf8x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i8m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i16m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i32m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_i64m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf8x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u8m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u16m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32mf2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u32m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m1x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m2x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_u64m4x2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i32.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxseg2ei32_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { - return __riscv_vsuxseg2ei32_tuple(mask, base, bindex, v_tuple, vl); -} - diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei64.c @@ -7,823 +7,1151 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f16mf4x2(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f16mf2x2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f16m1x2(_Float16 *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f16m2x2(_Float16 *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f32mf2x2(float *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f32m1x2(float *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f32m2x2(float *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f32m4x2(float *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f64m1x2(double *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f64m2x2(double *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f64m4x2(double *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i8mf8x2(int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i8mf4x2(int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i8mf2x2(int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i8m1x2(int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i16mf4x2(int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i16mf2x2(int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i16m1x2(int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i16m2x2(int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i32mf2x2(int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i32m1x2(int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i32m2x2(int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i32m4x2(int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i64m1x2(int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i64m2x2(int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i64m4x2(int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u8mf8x2(uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u8mf4x2(uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u8mf2x2(uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u8m1x2(uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u16mf4x2(uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u16mf2x2(uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u16m1x2(uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u16m2x2(uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u32mf2x2(uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u32m1x2(uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u32m2x2(uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u32m4x2(uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u64m1x2(uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u64m2x2(uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u64m4x2(uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f32mf2x2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f32m1x2_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f32m2x2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f32m4x2_m(vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f64m1x2_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f64m2x2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_f64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f64m4x2_m(vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_i64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_u64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i64.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i64.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei64(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei64(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg2ei8.c @@ -7,963 +7,1347 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f16mf4x2(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f16mf2x2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f16m1x2(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f16m2x2(_Float16 *base, vuint8m1_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16m4(_Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f16m4x2(_Float16 *base, vuint8m2_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f32mf2x2(float *base, vuint8mf8_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f32m1x2(float *base, vuint8mf4_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f32m2x2(float *base, vuint8mf2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f32m4x2(float *base, vuint8m1_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f64m1x2(double *base, vuint8mf8_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f64m2x2(double *base, vuint8mf4_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f64m4x2(double *base, vuint8mf2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i8mf8x2(int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i8mf4x2(int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i8mf2x2(int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i8m1x2(int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i8m2x2(int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i8m4x2(int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i16mf4x2(int16_t *base, vuint8mf8_t bindex, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i16mf2x2(int16_t *base, vuint8mf4_t bindex, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i16m1x2(int16_t *base, vuint8mf2_t bindex, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i16m2x2(int16_t *base, vuint8m1_t bindex, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i16m4x2(int16_t *base, vuint8m2_t bindex, vint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i32mf2x2(int32_t *base, vuint8mf8_t bindex, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i32m1x2(int32_t *base, vuint8mf4_t bindex, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i32m2x2(int32_t *base, vuint8mf2_t bindex, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i32m4x2(int32_t *base, vuint8m1_t bindex, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i64m1x2(int64_t *base, vuint8mf8_t bindex, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i64m2x2(int64_t *base, vuint8mf4_t bindex, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i64m4x2(int64_t *base, vuint8mf2_t bindex, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf8x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i8.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u8mf8x2(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i8.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u8mf4x2(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i8.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u8mf2x2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i8.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u8m1x2(uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i8.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u8m2x2(uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv32i8.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u8m4x2(uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16mf4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i16.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u16mf4x2(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i16.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u16mf2x2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i16.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u16m1x2(uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i16.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u16m2x2(uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16i16.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u16m4x2(uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32mf2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i32.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u32mf2x2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i32.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u32m1x2(uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i32.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u32m2x2(uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8i32.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u32m4x2(uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m1x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1i64.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u64m1x2(uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m2x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2i64.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u64m2x2(uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m4x2 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4i64.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u64m4x2(uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f16mf4x2_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f16mf2x2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f16m1x2_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f16m2x2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f16m4x2_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f32mf2x2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f32m1x2_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f32m2x2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f32m4x2_m(vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f64m1x2_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f64m2x2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_f64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f64m4x2_m(vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i8mf8x2_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i8mf4x2_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i8mf2x2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i8m1x2_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i8m2x2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i8m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i8m4x2_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i16mf4x2_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i16mf2x2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i16m1x2_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i16m2x2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i16m4x2_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i32mf2x2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i32m1x2_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i32m2x2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i32m4x2_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i64m1x2_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i64m2x2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_i64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i64m4x2_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf8x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u8mf8x2_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u8mf4x2_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u8mf2x2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u8m1x2_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u8m2x2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u8m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u8m4x2_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16mf4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u16mf4x2_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u16mf2x2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u16m1x2_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u16m2x2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u16m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u16m4x2_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32mf2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u32mf2x2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u32m1x2_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u32m2x2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u32m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u32m4x2_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m1x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u64m1x2_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m2x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u64m2x2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_u64m4x2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i8.i64( [[V0]], [[V1]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , } [[TMP1]], 1 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i8.i64( [[TMP2]], [[TMP3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return __riscv_vsuxseg2ei8(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u64m4x2_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4x2_t v_tuple, size_t vl) { + return __riscv_vsuxseg2ei8(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei16.c @@ -7,743 +7,1187 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f16mf4x3(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f16mf2x3(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f16m1x3(_Float16 *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f16m2x3(_Float16 *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f32mf2x3(float *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f32m1x3(float *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f32m2x3(float *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f64m1x3(double *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f64m2x3(double *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i8mf8x3(int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i8mf4x3(int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i8mf2x3(int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i8m1x3(int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i8m2x3(int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i16mf4x3(int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i16mf2x3(int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i16m1x3(int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i16m2x3(int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i32mf2x3(int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i32m1x3(int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i32m2x3(int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i64m1x3(int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i64m2x3(int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u8mf8x3(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u8mf4x3(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u8mf2x3(uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u8m1x3(uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u8m2x3(uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u16mf4x3(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u16mf2x3(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u16m1x3(uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u16m2x3(uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u32mf2x3(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u32m1x3(uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u32m2x3(uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u64m1x3(uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u64m2x3(uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f32mf2x3_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f32m1x3_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f32m2x3_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f64m1x3_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_f64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f64m2x3_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_i64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei16_v_u64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i16.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei16(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei16(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei32.c @@ -7,743 +7,1187 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f16mf4x3(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f16mf2x3(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f16m1x3(_Float16 *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f16m2x3(_Float16 *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f32mf2x3(float *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f32m1x3(float *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f32m2x3(float *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f64m1x3(double *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f64m2x3(double *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i8mf8x3(int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i8mf4x3(int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i8mf2x3(int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i8m1x3(int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i8m2x3(int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i16mf4x3(int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i16mf2x3(int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i16m1x3(int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i16m2x3(int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i32mf2x3(int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i32m1x3(int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i32m2x3(int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i64m1x3(int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i64m2x3(int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u8mf8x3(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u8mf4x3(uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u8mf2x3(uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u8m1x3(uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u8m2x3(uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u16mf4x3(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u16mf2x3(uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u16m1x3(uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u16m2x3(uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u32mf2x3(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u32m1x3(uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u32m2x3(uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u64m1x3(uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u64m2x3(uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f32mf2x3_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f32m1x3_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f32m2x3_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f64m1x3_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_f64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f64m2x3_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_i64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_u64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i32.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei32(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei32(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei64.c @@ -7,703 +7,1123 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f16mf4x3(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f16mf2x3(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f16m1x3(_Float16 *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f16m2x3(_Float16 *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f32mf2x3(float *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f32m1x3(float *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f32m2x3(float *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f64m1x3(double *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f64m2x3(double *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i8mf8x3(int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i8mf4x3(int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i8mf2x3(int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i8m1x3(int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i16mf4x3(int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i16mf2x3(int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i16m1x3(int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i16m2x3(int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i32mf2x3(int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i32m1x3(int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i32m2x3(int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i64m1x3(int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i64m2x3(int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u8mf8x3(uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u8mf4x3(uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u8mf2x3(uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u8m1x3(uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u16mf4x3(uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u16mf2x3(uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u16m1x3(uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u16m2x3(uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u32mf2x3(uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u32m1x3(uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u32m2x3(uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u64m1x3(uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u64m2x3(uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f32mf2x3_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f32m1x3_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f32m2x3_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f64m1x3_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_f64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f64m2x3_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_i64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_u64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i64.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei64(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei64(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg3ei8.c @@ -7,743 +7,1187 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f16mf4x3(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f16mf2x3(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f16m1x3(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f16m2x3(_Float16 *base, vuint8m1_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f32mf2x3(float *base, vuint8mf8_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f32m1x3(float *base, vuint8mf4_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f32m2x3(float *base, vuint8mf2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f64m1x3(double *base, vuint8mf8_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f64m2x3(double *base, vuint8mf4_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i8mf8x3(int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i8mf4x3(int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i8mf2x3(int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i8m1x3(int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i8m2x3(int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i16mf4x3(int16_t *base, vuint8mf8_t bindex, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i16mf2x3(int16_t *base, vuint8mf4_t bindex, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i16m1x3(int16_t *base, vuint8mf2_t bindex, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i16m2x3(int16_t *base, vuint8m1_t bindex, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i32mf2x3(int32_t *base, vuint8mf8_t bindex, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i32m1x3(int32_t *base, vuint8mf4_t bindex, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i32m2x3(int32_t *base, vuint8mf2_t bindex, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i64m1x3(int64_t *base, vuint8mf8_t bindex, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i64m2x3(int64_t *base, vuint8mf4_t bindex, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf8x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i8.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u8mf8x3(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u8mf4x3(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u8mf2x3(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u8m1x3(uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u8m2x3(uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16mf4x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i16.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u16mf4x3(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u16mf2x3(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u16m1x3(uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u16m2x3(uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32mf2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i32.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u32mf2x3(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u32m1x3(uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u32m2x3(uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u64m1x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1i64.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u64m1x3(uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u64m2x3 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2i64.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u64m2x3(uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f16mf4x3_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f16mf2x3_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f16m1x3_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f16m2x3_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f32mf2x3_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f32m1x3_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f32m2x3_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f64m1x3_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_f64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f64m2x3_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i8mf8x3_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i8mf4x3_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i8mf2x3_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i8m1x3_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i8m2x3_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i16mf4x3_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i16mf2x3_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i16m1x3_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i16m2x3_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i32mf2x3_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i32m1x3_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i32m2x3_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i64m1x3_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_i64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i64m2x3_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf8x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u8mf8x3_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u8mf4x3_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u8mf2x3_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u8m1x3_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u8m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u8m2x3_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16mf4x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u16mf4x3_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u16mf2x3_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u16m1x3_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u16m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u16m2x3_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32mf2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u32mf2x3_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u32m1x3_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u32m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u32m2x3_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u64m1x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u64m1x3_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_u64m2x3_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP2]], 0 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP2]], 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP2]], 2 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i8.i64( [[TMP3]], [[TMP4]], [[TMP5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return __riscv_vsuxseg3ei8(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u64m2x3_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x3_t v_tuple, size_t vl) { + return __riscv_vsuxseg3ei8(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei16.c @@ -7,743 +7,1335 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f16mf4x4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f16mf2x4(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f16m1x4(_Float16 *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f16m2x4(_Float16 *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f32mf2x4(float *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f32m1x4(float *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f32m2x4(float *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f64m1x4(double *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f64m2(double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f64m2x4(double *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i8mf8x4(int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i8mf4x4(int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i8mf2x4(int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i8m1x4(int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i8m2x4(int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i16mf4x4(int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i16mf2x4(int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i16m1x4(int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i16m2x4(int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i32mf2x4(int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i32m1x4(int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i32m2x4(int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i64m1x4(int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i64m2x4(int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u8mf8x4(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u8mf4x4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u8mf2x4(uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u8m1x4(uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u8m2x4(uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u16mf4x4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u16mf2x4(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u16m1x4(uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u16m2x4(uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u32mf2x4(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u32m1x4(uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u32m2x4(uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u64m1x4(uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u64m2x4(uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f32mf2x4_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f32m1x4_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f32m2x4_m(vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f64m1x4_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_f64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f64m2x4_m(vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_i64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei16_v_u64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i16.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei16(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei16(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei32.c @@ -7,743 +7,1335 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f16mf4x4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f16mf2x4(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f16m1x4(_Float16 *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f16m2x4(_Float16 *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f32mf2x4(float *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f32m1x4(float *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f32m2x4(float *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f64m1x4(double *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f64m2x4(double *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i8mf8x4(int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i8mf4x4(int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i8mf2x4(int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i8m1x4(int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i8m2x4(int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i16mf4x4(int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i16mf2x4(int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i16m1x4(int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i16m2x4(int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i32mf2x4(int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i32m1x4(int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i32m2x4(int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i64m1x4(int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i64m2x4(int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u8mf8x4(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u8mf4x4(uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u8mf2x4(uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u8m1x4(uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u8m2x4(uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u16mf4x4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u16mf2x4(uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u16m1x4(uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u16m2x4(uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u32mf2x4(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u32m1x4(uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u32m2x4(uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u64m1x4(uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u64m2x4(uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f32mf2x4_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f32m1x4_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f32m2x4_m(vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f64m1x4_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_f64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f64m2x4_m(vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_i64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_u64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i32.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei32(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei32(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei64.c @@ -7,703 +7,1263 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f16mf4x4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f16mf2x4(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f16m1x4(_Float16 *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f16m2x4(_Float16 *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f32mf2x4(float *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f32m1x4(float *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f32m2x4(float *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f64m1x4(double *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f64m2x4(double *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i8mf8x4(int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i8mf4x4(int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i8mf2x4(int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i8m1x4(int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i16mf4x4(int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i16mf2x4(int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i16m1x4(int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i16m2x4(int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i32mf2x4(int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i32m1x4(int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i32m2x4(int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i64m1x4(int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i64m2x4(int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u8mf8x4(uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u8mf4x4(uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u8mf2x4(uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u8m1x4(uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u16mf4x4(uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u16mf2x4(uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u16m1x4(uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u16m2x4(uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u32mf2x4(uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u32m1x4(uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u32m2x4(uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u64m1x4(uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u64m2x4(uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f32mf2x4_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f32m1x4_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f32m2x4_m(vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f64m1x4_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_f64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f64m2x4_m(vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_i64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_u64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i64.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei64(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei64(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg4ei8.c @@ -7,743 +7,1335 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f16mf4x4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f16mf2x4(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f16m1x4(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f16m2x4(_Float16 *base, vuint8m1_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f32mf2x4(float *base, vuint8mf8_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f32m1x4(float *base, vuint8mf4_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f32m2x4(float *base, vuint8mf2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f64m1x4(double *base, vuint8mf8_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f64m2x4(double *base, vuint8mf4_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i8mf8x4(int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i8mf4x4(int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i8mf2x4(int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i8m1x4(int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i8m2x4(int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i16mf4x4(int16_t *base, vuint8mf8_t bindex, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i16mf2x4(int16_t *base, vuint8mf4_t bindex, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i16m1x4(int16_t *base, vuint8mf2_t bindex, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i16m2x4(int16_t *base, vuint8m1_t bindex, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i32mf2x4(int32_t *base, vuint8mf8_t bindex, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i32m1x4(int32_t *base, vuint8mf4_t bindex, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i32m2x4(int32_t *base, vuint8mf2_t bindex, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i64m1x4(int64_t *base, vuint8mf8_t bindex, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i64m2x4(int64_t *base, vuint8mf4_t bindex, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf8x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i8.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u8mf8x4(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i8.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u8mf4x4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i8.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u8mf2x4(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i8.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u8m1x4(uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv16i8.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u8m2x4(uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16mf4x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i16.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u16mf4x4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i16.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u16mf2x4(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i16.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u16m1x4(uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8i16.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u16m2x4(uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32mf2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i32.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u32mf2x4(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i32.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u32m1x4(uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4i32.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u32m2x4(uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u64m1x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1i64.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u64m1x4(uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u64m2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u64m2x4 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2i64.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u64m2x4(uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f16mf4x4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f16mf2x4_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f16m1x4_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f16m2x4_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f32mf2x4_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f32m1x4_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f32m2x4_m(vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f64m1x4_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_f64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f64m2x4_m(vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i8mf8x4_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i8mf4x4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i8mf2x4_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i8m1x4_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i8m2x4_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i16mf4x4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i16mf2x4_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i16m1x4_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i16m2x4_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i32mf2x4_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i32m1x4_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i32m2x4_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i64m1x4_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_i64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i64m2x4_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf8x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u8mf8x4_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u8mf4x4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u8mf2x4_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u8m1x4_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u8m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u8m2x4_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16mf4x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u16mf4x4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u16mf2x4_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u16m1x4_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u16m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u16m2x4_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32mf2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u32mf2x4_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u32m1x4_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u32m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u32m2x4_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u64m1x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u64m1x4_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_u64m2x4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP3]], 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , } [[TMP3]], 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[TMP3]], 3 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i8.i64( [[TMP4]], [[TMP5]], [[TMP6]], [[TMP7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return __riscv_vsuxseg4ei8(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u64m2x4_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2x4_t v_tuple, size_t vl) { + return __riscv_vsuxseg4ei8(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei16.c @@ -7,523 +7,1043 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_f16mf4x5(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_f16mf2x5(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_f16m1x5(_Float16 *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_f32mf2x5(float *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_f32m1x5(float *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_f64m1x5(double *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i8mf8x5(int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i8mf4x5(int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i8mf2x5(int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i8m1x5(int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i16mf4x5(int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i16mf2x5(int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i16m1x5(int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i32mf2x5(int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i32m1x5(int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i64m1x5(int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u8mf8x5(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u8mf4x5(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u8mf2x5(uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u8m1x5(uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u16mf4x5(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u16mf2x5(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u16m1x5(uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u32mf2x5(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u32m1x5(uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u64m1x5(uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_f32mf2x5_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_f32m1x5_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_f64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_f64m1x5_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_i64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei16_v_u64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i16.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei16(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei16(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei32.c @@ -7,523 +7,1043 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_f16mf4x5(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_f16mf2x5(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_f16m1x5(_Float16 *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_f32mf2x5(float *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_f32m1x5(float *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_f64m1x5(double *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i8mf8x5(int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i8mf4x5(int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i8mf2x5(int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i8m1x5(int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i16mf4x5(int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i16mf2x5(int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i16m1x5(int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i32mf2x5(int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i32m1x5(int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i64m1x5(int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u8mf8x5(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u8mf4x5(uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u8mf2x5(uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u8m1x5(uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u16mf4x5(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u16mf2x5(uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u16m1x5(uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u32mf2x5(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u32m1x5(uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u64m1x5(uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_f32mf2x5_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_f32m1x5_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_f64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_f64m1x5_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_i64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_u64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i32.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei32(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei32_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei32(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei64.c @@ -7,523 +7,1043 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_f16mf4x5(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_f16mf2x5(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_f16m1x5(_Float16 *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_f32mf2x5(float *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_f32m1x5(float *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_f64m1x5(double *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i8mf8x5(int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i8mf4x5(int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i8mf2x5(int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i8m1x5(int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i16mf4x5(int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i16mf2x5(int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i16m1x5(int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i32mf2x5(int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i32m1x5(int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i64m1x5(int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u8mf8x5(uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u8mf4x5(uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u8mf2x5(uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u8m1x5(uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u16mf4x5(uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u16mf2x5(uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u16m1x5(uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u32mf2x5(uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u32m1x5(uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u64m1x5(uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_f32mf2x5_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_f32m1x5_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_f64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_f64m1x5_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_i64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_u64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i64.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei64(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei64_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei64(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg5ei8.c @@ -7,523 +7,1043 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_f16mf4x5(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_f16mf2x5(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_f16m1x5(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_f32mf2x5(float *base, vuint8mf8_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_f32m1x5(float *base, vuint8mf4_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_f64m1x5(double *base, vuint8mf8_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i8mf8x5(int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i8mf4x5(int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i8mf2x5(int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i8m1x5(int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i16mf4x5(int16_t *base, vuint8mf8_t bindex, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i16mf2x5(int16_t *base, vuint8mf4_t bindex, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i16m1x5(int16_t *base, vuint8mf2_t bindex, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i32mf2x5(int32_t *base, vuint8mf8_t bindex, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i32m1x5(int32_t *base, vuint8mf4_t bindex, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i64m1x5(int64_t *base, vuint8mf8_t bindex, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf8x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i8.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u8mf8x5(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i8.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u8mf4x5(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u8mf2x5(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u8m1x5(uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16mf4x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i16.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u16mf4x5(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i16.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u16mf2x5(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u16m1x5(uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u32mf2x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i32.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u32mf2x5(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u32m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2i32.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u32m1x5(uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u64m1x5 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1i64.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u64m1x5(uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_f16mf4x5_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_f16mf2x5_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_f16m1x5_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_f32mf2x5_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_f32m1x5_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_f64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_f64m1x5_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i8mf8x5_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i8mf4x5_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i8mf2x5_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i8m1x5_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i16mf4x5_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i16mf2x5_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i16m1x5_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i32mf2x5_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i32m1x5_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_i64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i64m1x5_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf8x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u8mf8x5_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u8mf4x5_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u8mf2x5_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u8m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u8m1x5_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16mf4x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u16mf4x5_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u16mf2x5_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u16m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u16m1x5_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u32mf2x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u32mf2x5_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u32m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u32m1x5_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_u64m1x5_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP4]], 0 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , } [[TMP4]], 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , } [[TMP4]], 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , } [[TMP4]], 3 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , } [[TMP4]], 4 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i8.i64( [[TMP5]], [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return __riscv_vsuxseg5ei8(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u64m1x5_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x5_t v_tuple, size_t vl) { + return __riscv_vsuxseg5ei8(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei16.c @@ -7,523 +7,1147 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_f16mf4x6(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_f16mf2x6(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_f16m1x6(_Float16 *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_f32mf2x6(float *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_f32m1x6(float *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_f64m1x6(double *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i8mf8x6(int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i8mf4x6(int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i8mf2x6(int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i8m1x6(int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i16mf4x6(int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i16mf2x6(int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i16m1x6(int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i32mf2x6(int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i32m1x6(int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i64m1x6(int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u8mf8x6(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u8mf4x6(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u8mf2x6(uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u8m1x6(uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u16mf4x6(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u16mf2x6(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u16m1x6(uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u32mf2x6(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u32m1x6(uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u64m1x6(uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_f32mf2x6_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_f32m1x6_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_f64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_f64m1x6_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_i64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei16_v_u64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i16.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei16(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei32.c @@ -7,523 +7,1147 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_f16mf4x6(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_f16mf2x6(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_f16m1x6(_Float16 *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_f32mf2x6(float *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_f32m1x6(float *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_f64m1x6(double *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i8mf8x6(int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i8mf4x6(int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i8mf2x6(int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i8m1x6(int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i16mf4x6(int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i16mf2x6(int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i16m1x6(int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i32mf2x6(int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i32m1x6(int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i64m1x6(int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u8mf8x6(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u8mf4x6(uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u8mf2x6(uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u8m1x6(uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u16mf4x6(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u16mf2x6(uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u16m1x6(uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u32mf2x6(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u32m1x6(uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u64m1x6(uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_f32mf2x6_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_f32m1x6_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_f64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_f64m1x6_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_i64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_u64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i32.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei32_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei32(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei64.c @@ -7,523 +7,1147 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_f16mf4x6(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_f16mf2x6(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_f16m1x6(_Float16 *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_f32mf2x6(float *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_f32m1x6(float *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_f64m1x6(double *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i8mf8x6(int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i8mf4x6(int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i8mf2x6(int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i8m1x6(int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i16mf4x6(int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i16mf2x6(int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i16m1x6(int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i32mf2x6(int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i32m1x6(int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i64m1x6(int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u8mf8x6(uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u8mf4x6(uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u8mf2x6(uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u8m1x6(uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u16mf4x6(uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u16mf2x6(uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u16m1x6(uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u32mf2x6(uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u32m1x6(uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u64m1x6(uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_f32mf2x6_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_f32m1x6_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_f64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_f64m1x6_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_i64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_u64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i64.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei64_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei64(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg6ei8.c @@ -7,523 +7,1147 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_f16mf4x6(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_f16mf2x6(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_f16m1x6(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_f32mf2x6(float *base, vuint8mf8_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_f32m1x6(float *base, vuint8mf4_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_f64m1x6(double *base, vuint8mf8_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i8mf8x6(int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i8mf4x6(int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i8mf2x6(int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i8m1x6(int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i16mf4x6(int16_t *base, vuint8mf8_t bindex, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i16mf2x6(int16_t *base, vuint8mf4_t bindex, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i16m1x6(int16_t *base, vuint8mf2_t bindex, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i32mf2x6(int32_t *base, vuint8mf8_t bindex, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i32m1x6(int32_t *base, vuint8mf4_t bindex, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i64m1x6(int64_t *base, vuint8mf8_t bindex, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf8x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i8.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u8mf8x6(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i8.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u8mf4x6(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i8.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u8mf2x6(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv8i8.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u8m1x6(uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16mf4x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i16.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u16mf4x6(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i16.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u16mf2x6(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4i16.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u16m1x6(uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u32mf2x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i32.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u32mf2x6(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u32m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2i32.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u32m1x6(uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u64m1x6 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1i64.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u64m1x6(uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_f16mf4x6_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_f16mf2x6_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_f16m1x6_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_f32mf2x6_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_f32m1x6_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_f64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_f64m1x6_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i8mf8x6_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i8mf4x6_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i8mf2x6_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i8m1x6_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i16mf4x6_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i16mf2x6_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i16m1x6_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i32mf2x6_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i32m1x6_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_i64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i64m1x6_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf8x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u8mf8x6_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u8mf4x6_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u8mf2x6_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u8m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u8m1x6_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16mf4x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u16mf4x6_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u16mf2x6_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u16m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u16m1x6_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u32mf2x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u32mf2x6_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u32m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u32m1x6_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_u64m1x6_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP5]], 0 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , } [[TMP5]], 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , } [[TMP5]], 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , } [[TMP5]], 3 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , } [[TMP5]], 4 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , } [[TMP5]], 5 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i8.i64( [[TMP6]], [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return __riscv_vsuxseg6ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u64m1x6_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x6_t v_tuple, size_t vl) { + return __riscv_vsuxseg6ei8(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei16.c @@ -7,523 +7,1251 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_f16mf4x7(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_f16mf2x7(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_f16m1x7(_Float16 *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_f32mf2x7(float *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_f32m1x7(float *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_f64m1x7(double *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i8mf8x7(int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i8mf4x7(int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i8mf2x7(int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i8m1x7(int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i16mf4x7(int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i16mf2x7(int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i16m1x7(int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i32mf2x7(int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i32m1x7(int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i64m1x7(int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u8mf8x7(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u8mf4x7(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u8mf2x7(uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u8m1x7(uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u16mf4x7(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u16mf2x7(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u16m1x7(uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u32mf2x7(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u32m1x7(uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u64m1x7(uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_f32mf2x7_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_f32m1x7_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_f64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_f64m1x7_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_i64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei16_v_u64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i16.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei16(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei32.c @@ -7,523 +7,1251 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_f16mf4x7(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_f16mf2x7(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_f16m1x7(_Float16 *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_f32mf2x7(float *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_f32m1x7(float *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_f64m1x7(double *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i8mf8x7(int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i8mf4x7(int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i8mf2x7(int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i8m1x7(int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i16mf4x7(int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i16mf2x7(int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i16m1x7(int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i32mf2x7(int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i32m1x7(int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i64m1x7(int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u8mf8x7(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u8mf4x7(uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u8mf2x7(uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u8m1x7(uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u16mf4x7(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u16mf2x7(uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u16m1x7(uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u32mf2x7(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u32m1x7(uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u64m1x7(uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_f32mf2x7_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_f32m1x7_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_f64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_f64m1x7_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_i64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_u64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i32.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei32_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei32(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei64.c @@ -7,523 +7,1251 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_f16mf4x7(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_f16mf2x7(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_f16m1x7(_Float16 *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_f32mf2x7(float *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_f32m1x7(float *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_f64m1x7(double *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i8mf8x7(int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i8mf4x7(int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i8mf2x7(int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i8m1x7(int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i16mf4x7(int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i16mf2x7(int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i16m1x7(int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i32mf2x7(int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i32m1x7(int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i64m1x7(int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u8mf8x7(uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u8mf4x7(uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u8mf2x7(uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u8m1x7(uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u16mf4x7(uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u16mf2x7(uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u16m1x7(uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u32mf2x7(uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u32m1x7(uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u64m1x7(uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_f32mf2x7_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_f32m1x7_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_f64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_f64m1x7_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_i64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_u64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i64.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei64_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei64(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg7ei8.c @@ -7,523 +7,1251 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_f16mf4x7(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_f16mf2x7(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_f16m1x7(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_f32mf2x7(float *base, vuint8mf8_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_f32m1x7(float *base, vuint8mf4_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_f64m1x7(double *base, vuint8mf8_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i8mf8x7(int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i8mf4x7(int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i8mf2x7(int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i8m1x7(int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i16mf4x7(int16_t *base, vuint8mf8_t bindex, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i16mf2x7(int16_t *base, vuint8mf4_t bindex, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i16m1x7(int16_t *base, vuint8mf2_t bindex, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i32mf2x7(int32_t *base, vuint8mf8_t bindex, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i32m1x7(int32_t *base, vuint8mf4_t bindex, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i64m1x7(int64_t *base, vuint8mf8_t bindex, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf8x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i8.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u8mf8x7(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i8.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u8mf4x7(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i8.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u8mf2x7(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv8i8.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u8m1x7(uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16mf4x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i16.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u16mf4x7(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i16.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u16mf2x7(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4i16.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u16m1x7(uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u32mf2x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i32.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u32mf2x7(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u32m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2i32.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u32m1x7(uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u64m1x7 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u64m1x7(uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_f16mf4x7_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_f16mf2x7_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_f16m1x7_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_f32mf2x7_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_f32m1x7_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_f64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_f64m1x7_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i8mf8x7_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i8mf4x7_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i8mf2x7_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i8m1x7_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i16mf4x7_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i16mf2x7_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i16m1x7_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i32mf2x7_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i32m1x7_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_i64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i64m1x7_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf8x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u8mf8x7_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u8mf4x7_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u8mf2x7_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u8m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u8m1x7_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16mf4x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u16mf4x7_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u16mf2x7_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u16m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u16m1x7_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u32mf2x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u32mf2x7_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u32m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u32m1x7_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_u64m1x7_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP6]], 0 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , } [[TMP6]], 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , } [[TMP6]], 2 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , } [[TMP6]], 3 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , } [[TMP6]], 4 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , } [[TMP6]], 5 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , } [[TMP6]], 6 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i8.i64( [[TMP7]], [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return __riscv_vsuxseg7ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u64m1x7_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x7_t v_tuple, size_t vl) { + return __riscv_vsuxseg7ei8(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei16.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei16.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei16.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei16.c @@ -7,523 +7,1355 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_f16mf4x8(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_f16mf2x8(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_f16m1x8(_Float16 *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f32mf2(float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_f32mf2x8(float *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_f32m1x8(float *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f64m1(double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_f64m1x8(double *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i8mf8x8(int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i8mf4x8(int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i8mf2x8(int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i8m1x8(int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i16mf4x8(int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i16mf2x8(int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i16m1x8(int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i32mf2x8(int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i32m1x8(int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i64m1x8(int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u8mf8x8(uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u8mf4x8(uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u8mf2x8(uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u8m1x8(uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u16mf4x8(uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u16mf2x8(uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u16m1x8(uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u32mf2x8(uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u32m1x8(uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u64m1x8(uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_f32mf2x8_m(vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_f32m1x8_m(vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_f64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_f64m1x8_m(vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_i64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei16_v_u64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i16.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i16.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei16(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei16(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei32.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei32.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei32.c @@ -7,523 +7,1355 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_f16mf4x8(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_f16mf2x8(_Float16 *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_f16m1x8(_Float16 *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f32mf2(float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_f32mf2x8(float *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_f32m1x8(float *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f64m1(double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_f64m1x8(double *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i8mf8x8(int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i8mf4x8(int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i8mf2x8(int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i8m1x8(int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i16mf4x8(int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i16mf2x8(int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i16m1x8(int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i32mf2x8(int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i32m1x8(int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i64m1x8(int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u8mf8x8(uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u8mf4x8(uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u8mf2x8(uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u8m1x8(uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u16mf4x8(uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u16mf2x8(uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u16m1x8(uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u32mf2x8(uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u32m1x8(uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u64m1x8(uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_f32mf2x8_m(vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_f32m1x8_m(vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_f64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_f64m1x8_m(vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_i64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_u64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i32.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i32.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei32(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei32_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei32(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei64.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei64.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei64.c @@ -7,523 +7,1355 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_f16mf4x8(_Float16 *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_f16mf2x8(_Float16 *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_f16m1x8(_Float16 *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f32mf2(float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_f32mf2x8(float *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_f32m1x8(float *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_f64m1x8(double *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i8mf8x8(int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i8mf4x8(int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i8mf2x8(int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i8m1x8(int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i16mf4x8(int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i16mf2x8(int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i16m1x8(int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i32mf2x8(int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i32m1x8(int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i64m1x8(int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u8mf8x8(uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u8mf4x8(uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u8mf2x8(uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u8m1x8(uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u16mf4x8(uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u16mf2x8(uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u16m1x8(uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u32mf2x8(uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u32m1x8(uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u64m1x8(uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_f32mf2x8_m(vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_f32m1x8_m(vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_f64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_f64m1x8_m(vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_i64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_u64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i64.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i64.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei64(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei64_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei64(mask, base, bindex, v_tuple, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei8.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei8.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsuxseg8ei8.c @@ -7,523 +7,1355 @@ #include -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_f16mf4x8(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_f16mf2x8(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_f16m1x8(_Float16 *base, vuint8mf2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_f32mf2x8(float *base, vuint8mf8_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_f32m1x8(float *base, vuint8mf4_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_f64m1x8(double *base, vuint8mf8_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i8mf8x8(int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i8mf4x8(int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i8mf2x8(int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i8m1x8(int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i16mf4x8(int16_t *base, vuint8mf8_t bindex, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i16mf2x8(int16_t *base, vuint8mf4_t bindex, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i16m1x8(int16_t *base, vuint8mf2_t bindex, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i32mf2x8(int32_t *base, vuint8mf8_t bindex, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i32m1x8(int32_t *base, vuint8mf4_t bindex, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i64m1x8(int64_t *base, vuint8mf8_t bindex, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf8 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf8x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i8.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u8mf8x8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i8.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u8mf4x8(uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i8.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u8mf2x8(uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv8i8.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u8m1x8(uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16mf4 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16mf4x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i16.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u16mf4x8(uint16_t *base, vuint8mf8_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i16.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u16mf2x8(uint16_t *base, vuint8mf4_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4i16.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u16m1x8(uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u32mf2 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u32mf2x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i32.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u32mf2x8(uint32_t *base, vuint8mf8_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u32m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u32m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2i32.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u32m1x8(uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u64m1 -// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u64m1x8 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1i64.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u64m1x8(uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_f16mf4x8_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_f16mf2x8_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_f16m1x8_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_f32mf2x8_m(vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_f32m1x8_m(vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_f64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_f64m1x8_m(vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i8mf8x8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i8mf4x8_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i8mf2x8_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i8m1x8_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i16mf4x8_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i16mf2x8_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i16m1x8_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i32mf2x8_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i32m1x8_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_i64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i64m1x8_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf8x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u8mf8x8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u8mf4x8_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u8mf2x8_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u8m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u8m1x8_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16mf4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16mf4x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u16mf4x8_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u16mf2x8_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u16m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u16m1x8_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u32mf2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u32mf2x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u32mf2x8_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u32m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u32m1x8_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); } -// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_u64m1x8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], [[BINDEX:%.*]], [[V_TUPLE_COERCE0:%.*]], [[V_TUPLE_COERCE1:%.*]], [[V_TUPLE_COERCE2:%.*]], [[V_TUPLE_COERCE3:%.*]], [[V_TUPLE_COERCE4:%.*]], [[V_TUPLE_COERCE5:%.*]], [[V_TUPLE_COERCE6:%.*]], [[V_TUPLE_COERCE7:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i8.i64( [[V0]], [[V1]], [[V2]], [[V3]], [[V4]], [[V5]], [[V6]], [[V7]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { , , , , , , , } poison, [[V_TUPLE_COERCE0]], 0 +// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { , , , , , , , } [[TMP0]], [[V_TUPLE_COERCE1]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { , , , , , , , } [[TMP1]], [[V_TUPLE_COERCE2]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { , , , , , , , } [[TMP2]], [[V_TUPLE_COERCE3]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { , , , , , , , } [[TMP3]], [[V_TUPLE_COERCE4]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { , , , , , , , } [[TMP4]], [[V_TUPLE_COERCE5]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = insertvalue { , , , , , , , } [[TMP5]], [[V_TUPLE_COERCE6]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = insertvalue { , , , , , , , } [[TMP6]], [[V_TUPLE_COERCE7]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 0 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 1 +// CHECK-RV64-NEXT: [[TMP10:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 2 +// CHECK-RV64-NEXT: [[TMP11:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 3 +// CHECK-RV64-NEXT: [[TMP12:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 4 +// CHECK-RV64-NEXT: [[TMP13:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 5 +// CHECK-RV64-NEXT: [[TMP14:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 6 +// CHECK-RV64-NEXT: [[TMP15:%.*]] = extractvalue { , , , , , , , } [[TMP7]], 7 +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i8.i64( [[TMP8]], [[TMP9]], [[TMP10]], [[TMP11]], [[TMP12]], [[TMP13]], [[TMP14]], [[TMP15]], ptr [[BASE]], [[BINDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return __riscv_vsuxseg8ei8(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u64m1x8_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1x8_t v_tuple, size_t vl) { + return __riscv_vsuxseg8ei8(mask, base, bindex, v_tuple, vl); }